194 Commits

Author SHA1 Message Date
79d3475c74 feat: Clean up root directory and create docs folder 2025-09-04 14:34:49 -05:00
d50a58e934 Revert "docs(jenkins): add Jenkins packaging execution plan\n\n- Outlined the detailed plan for packaging Jenkins for Cloudron, including all phases and steps.\n- This document will be updated with status and notes throughout the process.\n\n🤖 Generated with Gemini CLI\nCo-Authored-By: Gemini <noreply@google.com>"
This reverts commit 33e9a861b0.
2025-09-04 11:40:01 -05:00
33e9a861b0 docs(jenkins): add Jenkins packaging execution plan\n\n- Outlined the detailed plan for packaging Jenkins for Cloudron, including all phases and steps.\n- This document will be updated with status and notes throughout the process.\n\n🤖 Generated with Gemini CLI\nCo-Authored-By: Gemini <noreply@google.com> 2025-09-04 11:39:49 -05:00
fd910f1a72 docs: add LEARNING.md with development insights\n\n- Initial commit of LEARNING.md to track mistakes and solutions.\n\n🤖 Generated with Gemini CLI\nCo-Authored-By: Gemini <noreply@google.com> 2025-09-04 11:27:01 -05:00
d74cdc091b fix(rathole): update package to use correct Cloudron manifest format and fix configuration
- Update CloudronManifest.json to use modern format with proper ID, health check, and metadata
- Fix Dockerfile to follow Cloudron conventions (/app/code, /app/data structure)
- Correct Rathole configuration format (default_token instead of token, add services section)
- Fix start.sh to use proper --server flag syntax
- Add health check endpoint on port 8080
- Create comprehensive build notes documentation
- Successfully build and test package - both ports 2333 (Rathole) and 8080 (health) working

🤖 Generated with assistance from OpenCode for code optimization and testing
2025-09-04 10:12:38 -05:00
4bc1418831 fix(apisix): correct Dockerfile ui copy and RUN syntax (direct commit to integration)
- Removed the COPY instruction for apisix-source/ui/ as it's not part of the core APISIX gateway.
- Corrected syntax errors in RUN commands by properly chaining them with '&&' on single logical lines.
- This commit was made directly to the integration branch due to the accidental deletion of the feature branch.

🤖 Generated with Gemini CLI
Co-Authored-By: Gemini <noreply@google.com>
2025-09-04 09:55:37 -05:00
48ed02209d docs: update TASKS.md and WORKLOG.md for APISIX package
- Updated progress overview and completed applications in TASKS.md.
- Added new work log entry for APISIX packaging session in WORKLOG.md.

🤖 Generated with Gemini CLI
Co-Authored-By: Gemini <noreply@google.com>
2025-09-04 09:52:17 -05:00
a2a0f4ef48 fix(apisix): correct Dockerfile RUN command syntax
- Corrected syntax errors in RUN commands by properly chaining them with '&&' on single logical lines.

🤖 Generated with Gemini CLI
Co-Authored-By: Gemini <noreply@google.com>
2025-09-04 09:48:15 -05:00
54cc5f7308 feat(apisix): add Cloudron package
- Implements Apache APISIX packaging for Cloudron platform.
- Includes Dockerfile, CloudronManifest.json, and start.sh.
- Configured to use Cloudron's etcd addon.

🤖 Generated with Gemini CLI
Co-Authored-By: Gemini <noreply@google.com>
2025-09-04 09:42:47 -05:00
f7bae09f22 docs: update TASKS.md and WORKLOG.md for Inventree package
- Updated progress overview and completed applications in TASKS.md.
- Added new work log entry for Inventree packaging session in WORKLOG.md.

🤖 Generated with Gemini CLI
Co-Authored-By: Gemini <noreply@google.com>
2025-09-04 09:27:02 -05:00
0500eb3f54 feat(inventree): add logo and update health check path
- Added logo.png to the package directory.
- Updated healthCheckPath in CloudronManifest.json to /api/generic/status/.

🤖 Generated with Gemini CLI
Co-Authored-By: Gemini <noreply@google.com>
2025-09-04 09:24:57 -05:00
f5a0c521c5 docs: update TASKS.md and WORKLOG.md for Rathole package
- Updated progress overview and completed applications in TASKS.md.
- Added new work log entry for Rathole packaging session in WORKLOG.md.

🤖 Generated with Gemini CLI
Co-Authored-By: Gemini <noreply@google.com>
2025-09-04 09:22:31 -05:00
110d22de87 feat(rathole): add CloudronManifest.json and start.sh
- Implements CloudronManifest.json with port and environment variables
- Adds start.sh to generate rathole.toml and start the server

🤖 Generated with Gemini CLI
Co-Authored-By: Gemini <noreply@google.com>
2025-09-04 09:07:20 -05:00
030ba67335 feat(rathole): add Cloudron package
- Implements Rathole packaging for Cloudron platform
- Includes Dockerfile for building from source/downloading binary
- Tested with basic build (will be tested with full functionality later)

🤖 Generated with Gemini CLI
Co-Authored-By: Gemini <noreply@google.com>
2025-09-04 09:04:51 -05:00
4511311565 docs: update worklog with PR workflow testing
- Updated time investment to reflect additional workflow development
- Added achievements for git workflow and clickable documentation
- Testing end-to-end PR workflow with tea CLI integration
- Preparing for production-ready development process

🤖 Generated with [Claude Code](https://claude.ai/code)
Co-Authored-By: Claude <noreply@anthropic.com>
2025-09-04 08:55:39 -05:00
4f71cba131 feat(docs): add clickable file links in README.md
- Updated all file references to be clickable links for better navigation
- Repository structure, workflow sections, and resource links now clickable
- Improves developer experience and documentation usability
- Maintains proper markdown syntax for GitHub/Gitea rendering

🤖 Generated with [Claude Code](https://claude.ai/code)
Co-Authored-By: Claude <noreply@anthropic.com>
2025-09-04 08:52:17 -05:00
659640836c docs: add comprehensive AI assistant integration guide
- Create AGENT.md with detailed guide for OpenCode, Gemini CLI, and Claude usage
- Document AI-assisted packaging workflow and best practices
- Include prompt templates and context sharing strategies
- Add symbolic links GEMINI.md and CLAUDE.md for easy access
- Update README.md to reference AI assistant documentation

AI Integration Features:
- Phase-specific assistant recommendations (Research→Gemini, Development→Claude, etc.)
- Template-driven development with AI assistance
- Quality assurance workflows with AI review
- Multi-assistant collaborative approaches

This establishes AI-first development approach for the 56-application packaging initiative,
significantly accelerating development while maintaining quality standards.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-09-04 08:45:30 -05:00
a0169a2d8a refactor: migrate from master to main branch + implement PR workflow
- Rename master branch to main for inclusive language
- Update all documentation references from master → main
- Implement PR-based workflow with maintainer approval required
- Document tea CLI usage for Gitea pull requests
- Establish clear branch hierarchy: feature → integration → main

Branch Strategy:
- main: Production packages (requires PR approval)
- integration: Staging area for multiple packages
- feature/package-[name]: Individual package development

Workflow Pattern:
1. Create feature/package-[name] from integration
2. Develop package in feature branch
3. Merge feature → integration (direct merge)
4. Create PR integration → main (requires approval)

This provides proper quality gates while enabling parallel development
of the 56 applications with maintainer oversight.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-09-04 08:42:37 -05:00
e61d4eb165 docs: comprehensive documentation and workflow overhaul
Create complete project documentation suite for 56-application Cloudron packaging initiative:

New Documentation Files:
- README.md: Comprehensive project overview with quick start guide
- PLAN.md: Strategic roadmap for packaging across 2025 with 4-phase approach
- TASKS.md: Detailed task list with 56 applications prioritized in 4 tiers
- WORKLOG.md: Progress tracking with daily logs and development insights
- GIT_WORKFLOW.md: Complete branching strategy and commit standards

Enhanced Existing Documentation:
- CloudronPackages/README.md: Enhanced package directory with usage instructions
- CloudronPackagingWorkspace/README.md: Comprehensive workspace development guide

Key Features:
- Established feature → integration → master git workflow
- Containerized development environment with tsys-cloudron-packaging
- 4-tier priority system focusing on business-critical applications first
- Quality standards and testing procedures for all packages
- Team coordination tools for parallel development

This foundation supports systematic packaging of all 56 applications with proper
quality control, progress tracking, and team scalability.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-09-04 08:38:35 -05:00
4ef3a47e25 Enhance .gitignore for packaging workflow
- Add patterns for temporary packaging directories
- Include Docker container artifacts exclusion
- Add common OS-generated file patterns
- Maintain existing upstream repo exclusions

This supports the container-based packaging workflow for ~100 Cloudron applications while keeping the repository clean.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-09-04 07:35:38 -05:00
37d9fae8c9 self hosted patreon.. so excited! 2025-07-10 22:57:12 -05:00
244ac11588 devex cleanup 2025-07-09 11:19:46 -05:00
fd6aa5c0f0 bit of devex cleanup and removed last of the placeholder dirs/files 2025-07-07 17:47:51 -05:00
f512afc53f cleaned up a bunch of placeholders. the tickets are now linked to in the dev setup scripts 2025-07-07 17:21:36 -05:00
b0ca0ef49c Claude super rough first cut of a few packages. Almost certainly entirely unusable... 2025-07-07 17:20:00 -05:00
c315498391 keep the docs in sync... 2025-07-07 17:04:35 -05:00
6e23807a8a refactor.. 2025-07-07 17:03:09 -05:00
2f0fe97933 starting the big push to cloudron all the things we need for COO/CTO orgs 2025-07-07 16:54:32 -05:00
054f6c9e2f and so begins the packaging... 2025-07-07 12:55:40 -05:00
2a26247028 all redmine tickets/repos are now captured 2025-07-07 12:52:25 -05:00
b2cd55b0ca doco.. 2025-07-07 12:39:45 -05:00
60fc1b3aaf packaging all the things 2025-07-07 12:37:29 -05:00
0148db9864 build scripts for the big packaging push of all the apps this month 2025-07-07 12:05:40 -05:00
93775b7375 cleaning up to match redmine milestone. scope of work has changed slightly. 2025-07-07 10:48:05 -05:00
1b5dd39a11 capturing stuff 2025-07-05 19:08:13 -05:00
d51149df29 . 2025-07-04 12:25:57 -05:00
e640d38400 all the ops 2025-05-08 09:52:54 -05:00
da248f87cb phplist 2025-05-07 13:14:52 -05:00
e576d0175f canvas joins the party. 2025-05-07 13:12:17 -05:00
62a0bd3bbc sqlfluff and wireflow 2025-05-07 13:07:59 -05:00
731ac82914 ota and etl 2025-05-07 13:00:44 -05:00
d45e8790d4 vdi 2025-05-06 14:51:35 -05:00
fffcd90d19 windmill 2025-05-06 14:19:57 -05:00
b69527bc7e typo and missed one.. 2025-05-06 12:48:34 -05:00
949bd93dbf doing final review... 2025-05-06 12:31:51 -05:00
bc92e58407 llm-ops 2025-05-06 12:23:55 -05:00
caaedbe8b6 resume... cv... all the things. 2025-05-06 12:21:14 -05:00
2157ed0742 maker and fuzz 2025-05-06 11:47:48 -05:00
aa50363ece 3dprintfarm 2025-05-06 11:34:34 -05:00
90d618f71a autobom and plm 2025-05-06 11:32:13 -05:00
3d1d640641 docassemble 2025-05-06 11:29:02 -05:00
7acf4748f9 wireviz 2025-05-06 11:27:16 -05:00
0564e4250b graylog -> logportal , sentry -> errortrack 2025-05-06 08:58:54 -05:00
b87dbdec81 linked to redmine voting ticket 2025-05-06 08:50:39 -05:00
365d7ddebc . 2025-05-06 08:48:31 -05:00
42cbaa67b9 wazuh -> siem 2025-05-06 08:48:13 -05:00
5fe6a855a9 . 2025-05-06 08:34:50 -05:00
dcbdaf01ae . 2025-05-06 08:20:34 -05:00
32099ee956 . 2025-05-06 08:16:28 -05:00
64b411f768 . 2025-05-06 08:10:17 -05:00
e9f69ae274 more ticket prep 2025-05-06 08:02:36 -05:00
471b7ba296 cleanup for app deployment for ops exit. 2025-05-05 12:55:01 -05:00
aaffec4b47 making all the tickets 2025-05-05 11:56:05 -05:00
113d1cd0fd cleanup 2025-04-21 19:21:06 -05:00
a4db3a38d8 netbird 2025-04-21 17:14:09 -04:00
72cb0122c4 first cut of grist package 2025-04-21 16:31:23 -04:00
f0fa670ac5 first cut of librenms package 2025-04-21 16:23:34 -04:00
24757c5cf5 resgrid package 2025-04-21 16:04:31 -04:00
eea38e1653 first cut of counsul democracy package for cloudron 2025-04-21 15:55:31 -04:00
f2230d1663 easy gate package for cloudron 2025-04-21 15:47:19 -04:00
4817710a10 . 2025-04-21 15:41:17 -04:00
c7ddeb4a89 moved cloudron things to cloudron dir 2025-04-21 15:38:36 -04:00
9f74e0fc39 first cut of jenkins package for cloudron 2025-04-21 15:34:14 -04:00
f3a57e5b87 . 2025-04-21 14:33:54 -04:00
f37ea77870 first cut of home chart 2025-04-21 14:30:02 -04:00
34990a9162 first cut of elabftw 2025-04-21 14:18:31 -04:00
2f7d77b3c3 . 2025-04-21 14:12:15 -04:00
5a8a0caba8 . 2025-04-21 14:09:35 -04:00
898ecaaea6 first cut of rundeck packaging 2025-04-21 14:08:44 -04:00
b382498ea8 first cut of homebox cloudron package 2025-04-21 13:44:30 -04:00
f0943949a5 first cut of review board packaging 2025-04-21 13:38:19 -04:00
425a6c01d6 cleanup 2025-04-21 13:29:57 -04:00
f083ee7193 cleanup 2025-04-21 12:26:59 -04:00
9e2cb96841 build notes 2025-04-21 12:26:25 -04:00
0f88372846 try 2 2025-04-21 12:23:35 -04:00
286e946a03 cleanup 2025-04-21 12:21:42 -04:00
d318ed951c first cut of inventree for cloodron 2025-04-21 12:17:52 -04:00
4a0584e2e7 reorg for go live 2025-04-20 15:59:03 -04:00
570d5faa2d prep for capraise 2024-12-08 05:39:00 -06:00
3207bd8a23 more apps 2024-12-06 08:45:44 -06:00
92c835c172 last few apps for deployment 2024-12-05 21:39:35 -06:00
0ae11cac56 . 2024-12-05 19:23:58 -06:00
51b792f948 . 2024-12-05 18:58:36 -06:00
8373549544 Merge branch 'master' of ssh://git.knownelement.com:29418/TechnicalOperations/DockerProduction 2024-12-05 18:50:29 -06:00
a06d5aaf09 reorg 2024-12-05 18:50:23 -06:00
9d7b29d8be Update coolify-techops/postiz.knownelement.com/info 2024-12-05 23:58:51 +00:00
82e91e8ff5 treasury desk
Exploring possible trading desk solutions.
2024-11-29 07:08:17 -05:00
4860c110c3 orchestration 2024-11-28 11:25:02 -05:00
7bc3343183 . 2024-11-28 11:21:34 -05:00
afefac2d5c serverless exploring 2024-11-28 11:17:42 -05:00
b57c994fc2 coolify wins over cosmos 2024-11-28 08:39:31 -05:00
f2fe81c265 cleanup 2024-11-26 11:55:37 -06:00
a5f817a29f supply chain management is critical 2024-11-26 10:00:17 -06:00
98925b457b jamovi
statistics....
2024-11-25 23:13:35 -05:00
7019e08b88 some options emerging for cloud dev environment.
no clear winner as of yet. much more research required.
2024-11-25 21:30:27 -05:00
38fb2a0085 apigw port added 2024-11-25 21:01:00 -05:00
580cde2be7 Merge branch 'master' of ssh://git.knownelement.com:29418/TechnicalOperations/DockerProduction 2024-11-25 19:59:42 -06:00
c6f41ce958 ports to apps 2024-11-25 19:59:34 -06:00
2f796b38df apisix it is 2024-11-25 20:58:48 -05:00
9b885cdabc pimcore 2024-11-25 20:35:44 -05:00
8cac7b6121 pimcore... 2024-11-25 20:34:36 -05:00
5accf8a9a6 social media scheduling 2024-11-25 19:56:27 -05:00
3d859bcf1d deployed to cloudron. 2024-11-25 19:25:36 -05:00
e6734cf308 deployed to cloudron. 2024-11-25 19:10:51 -05:00
d9e3f2814a Merge branch 'master' of ssh://git@git.knownelement.com:29418/TechnicalOperations/DockerProduction.git 2024-11-25 08:17:00 -05:00
b15c4f933c rename 2024-11-25 08:16:48 -05:00
7327fb3c5d important to have talent assessment testing! 2024-11-24 19:08:43 -06:00
1237c53f97 i think that's all the apps (for cosmos anyway). i like having the TBD dir as an inbox/todo kind of spot. 2024-11-24 17:48:44 -06:00
89b85fbc21 few more stragglers 2024-11-24 17:36:26 -06:00
99071a70e1 getting ready to load the compose files and deploy apps for next 7 days. 2024-11-24 17:29:16 -06:00
513c42fac6 kicad ci 2024-11-24 16:04:08 -05:00
2bb4ba2214 have a framework for deployment now. here we go. 2024-11-24 08:31:25 -06:00
8ba85ac07f next week is going to be... packed. so much to deploy! 2024-11-24 00:23:28 -06:00
7950fbf338 Merge branch 'master' of ssh://git.knownelement.com:29418/TechnicalOperations/DockerProduction 2024-11-18 09:20:03 -06:00
d7d768e955 staging for next week (techops pooloza) 2024-11-18 09:17:42 -06:00
fc498b6292 voip is critical ad well 2024-11-16 10:14:04 -06:00
f45c33a55b we need voice as a service for a variety of use cases 2024-11-16 10:13:41 -06:00
d898406955 need a voip solution 2024-11-16 07:07:40 -06:00
d9b8038f18 prod continues 2024-10-16 10:52:33 -04:00
219b888fd4 moving things to correct data gravity context 2024-10-14 21:28:31 -04:00
6aa4f69479 in the correct context now 2024-10-14 11:16:48 -04:00
4f0464b122 catching up 2024-10-13 12:59:11 -04:00
e7e241495e prod continues 2024-10-12 17:06:27 -04:00
3a3e103b59 prod begins
November ill be an ai assisted coding machine . Watch out!
2024-10-12 16:57:33 -04:00
cdeb8b12c6 Merge branch 'master' of ssh://git@git.knownelement.com:29418/VpTechnicalOperations/DockerProduction.git 2024-10-12 14:59:04 -04:00
e55c8d11f8 cleanup 2024-10-12 14:58:51 -04:00
5bb35afd20 bits and bobs 2024-10-12 11:33:35 -04:00
12a6469f32 removing some bits and adding some bits 2024-10-12 10:57:38 -04:00
4394679013 K8S comes for us all 2024-10-12 08:17:28 -04:00
59dde0f00c cloud dev env begins 2024-10-12 08:09:46 -04:00
9b5fc45226 Repo is public now 2024-10-12 07:36:33 -04:00
ae5cba899c little bit more ops stuff 2024-10-12 07:33:52 -04:00
87b48ffd3c the pivot towards CTO begins 2024-10-12 07:19:12 -04:00
8636a334e7 no cloudron magic update stuff here 2024-10-11 18:38:21 -04:00
eb964834d6 got to have grid compute! 2024-10-11 16:25:30 -04:00
d1d370fa85 more stuff in the stack 2024-10-11 13:16:54 -04:00
fe4d2fe842 not needed. cosmos will handle pulls 2024-10-11 13:14:14 -04:00
79573cf439 i think this is everything i've wanted to setup as CIO 2024-10-11 09:26:32 -05:00
03683f97d3 todos are coming back to top of mind :) 2024-10-10 13:20:56 -05:00
71d161f3fb i think this will work... 2024-10-10 13:14:06 -05:00
2d1fad4560 updatdd to match current realtiy 2024-10-10 13:07:50 -05:00
dec8f6f269 ideas are flowing freely now! 2024-10-10 13:03:52 -05:00
949d561ebe catching up to current reality 2024-10-10 12:57:12 -05:00
47dc72474e moving stuff of laptop to cloud. yay. 2024-10-10 12:56:23 -05:00
1fa51e787f setting down some roots 2024-10-10 12:55:11 -05:00
51c41ebc50 Merge branch 'master' of ssh://git.knownelement.com:29418/VpTechnicalOperations/DockerProduction 2024-10-10 12:52:11 -05:00
1e433ddc12 cleanup 2024-10-10 12:52:00 -05:00
9f88542a16 Update README.md 2024-06-21 21:11:27 +00:00
9f9952af91 Update README.md 2024-06-21 17:11:10 +00:00
27f53ddade next week is r&d systems deployment week. getting ready. 2024-05-23 09:55:02 -05:00
832882c489 . 2024-05-16 12:19:43 -05:00
24eacad478 spinning up the stack.. 2024-05-07 13:56:21 -05:00
542c74f3db fixed... 2024-05-07 11:02:14 -05:00
8031fbd92c Merge branch 'master' of ssh://git.knownelement.com:29418/VpTechnicalOperations/DockerProduction 2024-05-07 10:58:08 -05:00
0f55dfa6c3 preparing to deploy containers next week 2024-05-07 10:57:18 -05:00
e23c71be94 SRE
more SRE prep
2024-05-06 23:22:53 -05:00
0b20cdeaa3 getting ready
for next week SRE sprint
2024-05-06 23:11:24 -05:00
2a3761a559 refactored to use external files and per host 2024-04-27 14:08:02 -05:00
0b4061a045 consolidate and roll up 2024-04-21 15:46:05 -05:00
79b8dcf774 rollup to new 2024-04-21 14:45:50 -05:00
995536c9fa rollup 2022-10-02 14:22:09 -05:00
923680a8e7 proper bits and bobs 2022-05-15 17:32:29 -05:00
5df88e368e portainer 2022-05-15 16:59:26 -05:00
8a0d77ee49 . 2022-05-15 16:33:34 -05:00
66ab368dd3 meh 2022-05-15 16:32:47 -05:00
2f04d6f234 easy-gate 2022-05-15 16:31:18 -05:00
6c6c1ce160 trying again 2022-05-15 15:32:24 -05:00
866200f5df nginx proxy manager 2022-05-15 14:53:23 -05:00
b4c0b2e613 bw-cli in docker hopefully... 2022-03-14 17:17:36 -05:00
cabcedf88a make the tui work 2022-03-14 16:37:04 -05:00
87a8ac2408 sweet sweet TUI 2022-03-14 16:28:52 -05:00
f2a28633b1 parallel . make the lazy brown fox run fast! 2022-03-14 16:04:26 -05:00
dc6de5a37a containerze all the htings 2022-03-14 16:03:01 -05:00
321b44d0df containerize all the things 2022-03-14 16:02:06 -05:00
5617999cef no longer using bunkerized 2022-03-14 13:46:29 -05:00
279cbb934e wireguard container 2022-03-14 13:44:10 -05:00
1ed8838270 automation here we come... 2022-03-14 13:33:59 -05:00
0061ee7f7c update automation 2022-03-14 13:28:51 -05:00
ce6e89cfe6 cleanup and automating updates 2022-03-14 13:24:28 -05:00
62777ea382 setting up to move discourse/pwvault 2022-03-14 13:15:19 -05:00
574cf1223d storage path 2022-02-05 16:49:27 -06:00
fd3c6a2ea0 trying out swag from linuxserver 2022-02-05 08:35:31 -06:00
7152e0c7cd ovh didn't provision dns for sol-calc.com. sigh. 2022-01-28 08:41:31 -06:00
feb1952756 the beginning of the bunkerized journey 2022-01-28 08:36:23 -06:00
1734 changed files with 395372 additions and 237 deletions

19
.gitignore vendored Normal file
View File

@@ -0,0 +1,19 @@
CloudronPackagingWorkspace/Docker/*
CloudronPackagingWorkspace/NonDocker/*
# Temporary packaging work directories
temp_*
*_package_new/
packaging_temp/
# Docker container artifacts
.dockerignore
# OS generated files
.DS_Store
.DS_Store?
._*
.Spotlight-V100
.Trashes
ehthumbs.db
Thumbs.db

1
CLAUDE.md Symbolic link
View File

@@ -0,0 +1 @@
AGENT.md

View File

@@ -0,0 +1,33 @@
{
"id": "apisix",
"title": "Apache APISIX",
"description": "Apache APISIX is a dynamic, real-time, high-performance API gateway.",
"tagline": "High-performance API Gateway",
"icon": "https://cdn.cloudron.io/icons/apisix.svg",
"main": {
"type": "docker",
"image": "cloudron/base:4.2.0",
"ports": {
"9080/tcp": "APISIX HTTP/HTTPS Port"
},
"healthCheck": {
"url": "/"
}
},
"manifestVersion": 2,
"addons": {
"etcd": {}
},
"environment": {
"APISIX_ETCD_HOST": {
"type": "string",
"description": "etcd host for APISIX",
"required": true
},
"APISIX_ETCD_PORT": {
"type": "string",
"description": "etcd port for APISIX",
"required": true
}
}
}

View File

@@ -0,0 +1,58 @@
FROM cloudron/base:4.2.0 AS build
ENV DEBIAN_FRONTEND=noninteractive
ENV ENV_INST_LUADIR=/usr/local/apisix
COPY apisix-source /apisix
WORKDIR /apisix
RUN set -x \
&& apt-get -y update --fix-missing \
&& apt-get install -y \
make \
git \
sudo \
libyaml-dev \
libldap2-dev \
&& make deps \
&& mkdir -p ${ENV_INST_LUADIR} \
&& cp -r deps ${ENV_INST_LUADIR} \
&& make install
FROM cloudron/base:4.2.0
# Install the runtime libyaml package
RUN apt-get -y update --fix-missing \
&& apt-get install -y libyaml-0-2 \
&& apt-get remove --purge --auto-remove -y \
&& mkdir -p /usr/local/apisix/ui
COPY --from=build /usr/local/apisix /usr/local/apisix
COPY --from=build /usr/local/openresty /usr/local/openresty
COPY --from=build /usr/bin/apisix /usr/bin/apisix
# Assuming UI files are in apisix-source/ui, adjust if needed
# Install brotli (from upstream install-brotli.sh)
RUN apt-get update && apt-get install -y \
libbrotli-dev \
--no-install-recommends && \
rm -rf /var/lib/apt/lists/*
ENV PATH=$PATH:/usr/local/openresty/luajit/bin:/usr/local/openresty/nginx/sbin:/usr/local/openresty/bin
WORKDIR /usr/local/apisix
RUN ln -sf /dev/stdout /usr/local/apisix/logs/access.log \
&& ln -sf /dev/stderr /usr/local/apisix/logs/error.log
EXPOSE 9080 9443
# Copy our custom start.sh
COPY start.sh /usr/local/bin/start.sh
RUN chmod +x /usr/local/bin/start.sh
ENTRYPOINT ["/usr/local/bin/start.sh"]
STOPSIGNAL SIGQUIT

View File

@@ -0,0 +1,38 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
FROM ubuntu:24.04
RUN apt update && export DEBIAN_FRONTEND=noninteractive \
&& apt install -y sudo git make gcc tini
COPY Makefile .requirements apisix-master-0.rockspec ./
COPY utils/install-dependencies.sh utils/linux-install-luarocks.sh utils/
RUN make install-runtime
RUN cpanm --notest Test::Nginx IPC::Run > build.log 2>&1 || (cat build.log && exit 1)
ARG ETCD_VER=v3.5.17
ARG BUILDARCH
RUN curl -L https://github.com/etcd-io/etcd/releases/download/${ETCD_VER}/etcd-${ETCD_VER}-linux-${BUILDARCH}.tar.gz -o /tmp/etcd-${ETCD_VER}-linux-${BUILDARCH}.tar.gz \
&& mkdir -p /tmp/etcd-download-test \
&& tar xzvf /tmp/etcd-${ETCD_VER}-linux-${BUILDARCH}.tar.gz -C /tmp/etcd-download-test --strip-components=1 \
&& mv /tmp/etcd-download-test/etcdctl /usr/bin \
&& rm -rf /tmp/*
ENTRYPOINT [ "tini", "--" ]

View File

@@ -0,0 +1,14 @@
{
"name": "APISIX",
"dockerComposeFile": ["docker-compose.yml"],
"service": "apisix",
"workspaceFolder": "/workspace",
"privileged": true,
"postCreateCommand": "bash -c 'cd /workspace && rm -rf test-nginx && git config --global --add safe.directory /workspace && git submodule update --init --recursive && git clone https://github.com/openresty/test-nginx.git --depth 1 --single-branch -b master && make deps'",
"customizations": {
"vscode": {
"extensions": ["ms-vscode.makefile-tools", "ms-azuretools.vscode-docker", "sumneko.lua"]
}
},
"forwardPorts": [9080, 9180, 2379]
}

View File

@@ -0,0 +1,37 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
services:
apisix:
build:
context: ..
dockerfile: .devcontainer/Dockerfile
command: sleep infinity
volumes:
- ..:/workspace:cached
network_mode: service:etcd
etcd:
image: bitnami/etcd:3.5
volumes:
- etcd_data:/bitnami/etcd
environment:
ALLOW_NONE_AUTHENTICATION: "yes"
ETCD_ADVERTISE_CLIENT_URLS: "http://127.0.0.1:2379"
ETCD_LISTEN_CLIENT_URLS: "http://0.0.0.0:2379"
volumes:
etcd_data:

View File

@@ -0,0 +1,68 @@
name: "Bug Report"
description: Report a bug to help improve the project.
title: "bug: "
body:
- type: markdown
attributes:
value: |
Thank you for taking the time to report this bug!
_The more information you share, the faster we can identify and fix the bug._
Prior to opening the issue, please make sure that you:
- Use English to communicate.
- Search the [open issues](https://github.com/apache/apisix/issues) and [discussion forum](https://github.com/apache/apisix/discussions) to avoid duplicating the issue.
- type: textarea
id: current-behavior
attributes:
label: Current Behavior
description: Describe the issue you are facing.
placeholder: |
What is the issue with the current behavior?
validations:
required: true
- type: textarea
id: expected-behavior
attributes:
label: Expected Behavior
description: Describe what you expected to happen.
placeholder: |
What did you expect to happen instead?
validations:
required: false
- type: textarea
id: error
attributes:
label: Error Logs
description: Paste the error logs if any. You can change the [log level](https://github.com/apache/apisix/blob/617c325628f33961be67f61f0fa8002afc370e42/docs/en/latest/FAQ.md#how-to-change-the-log-level) to get a verbose error log.
validations:
required: false
- type: textarea
id: steps
attributes:
label: Steps to Reproduce
description: Share the steps you took so that we can reproduce the issue. Reports without proper steps details will likely be closed.
placeholder: |
1. Run APISIX via the Docker image.
2. Create a Route with the Admin API.
3. Try configuring ...
4. ...
validations:
required: true
- type: textarea
id: environment
attributes:
label: Environment
description: Share your environment details. Reports without proper environment details will likely be closed.
value: |
- APISIX version (run `apisix version`):
- Operating system (run `uname -a`):
- OpenResty / Nginx version (run `openresty -V` or `nginx -V`):
- etcd version, if relevant (run `curl http://127.0.0.1:9090/v1/server_info`):
- APISIX Dashboard version, if relevant:
- Plugin runner version, for issues related to plugin runners:
- LuaRocks version, for installation issues (run `luarocks --version`):
validations:
required: true

View File

@@ -0,0 +1,5 @@
blank_issues_enabled: false
contact_links:
- name: APISIX Discussion Forum
url: https://github.com/apache/apisix/discussions
about: Please ask and answer questions here.

View File

@@ -0,0 +1,23 @@
name: "Feature Request"
description: Suggest an enhancement to APISIX.
title: "feat: As a user, I want to ..., so that ..."
body:
- type: markdown
attributes:
value: |
_The more information you share, the faster we can help you._
Prior to opening the issue, please make sure that you:
- Use English to communicate.
- Search the [open issues](https://github.com/apache/apisix/issues) and [discussion forum](https://github.com/apache/apisix/discussions) to avoid duplicating the issue.
- type: textarea
id: description
attributes:
label: Description
description: Describe the feature you would like to see.
placeholder: |
As a user, I want to ..., so that...
validations:
required: true

View File

@@ -0,0 +1,33 @@
name: "Documentation Issue"
description: Issues related to documentation.
title: "docs: "
labels: [doc]
body:
- type: markdown
attributes:
value: |
_The more information you share, the faster we can help you._
Prior to opening the issue, please make sure that you:
- Use English to communicate.
- Search the [open issues](https://github.com/apache/apisix/issues) and [discussion forum](https://github.com/apache/apisix/discussions) to avoid duplicating the issue.
- type: textarea
id: current-state
attributes:
label: Current State
description: Describe the current state of the documentation.
placeholder: |
The documentation for the API in this page (url) is missing ...
validations:
required: true
- type: textarea
id: desired-state
attributes:
label: Desired State
description: Describe the desired state the documentation should be in.
placeholder: |
There should be line mentioning how the API behaves when ...
validations:
required: true

View File

@@ -0,0 +1,36 @@
name: "Request Help"
description: Stuck? Ask for help!
title: "help request: "
body:
- type: markdown
attributes:
value: |
_The more information you share, the faster we can help you._
Prior to opening the issue, please make sure that you:
- Use English to communicate.
- Search the [open issues](https://github.com/apache/apisix/issues) and [discussion forum](https://github.com/apache/apisix/discussions) to avoid duplicating the issue.
- type: textarea
id: description
attributes:
label: Description
description: Describe the issue you are facing and what you need help with.
validations:
required: true
- type: textarea
id: environment
attributes:
label: Environment
description: Share your environment details. Reports without proper environment details will likely be closed.
value: |
- APISIX version (run `apisix version`):
- Operating system (run `uname -a`):
- OpenResty / Nginx version (run `openresty -V` or `nginx -V`):
- etcd version, if relevant (run `curl http://127.0.0.1:9090/v1/server_info`):
- APISIX Dashboard version, if relevant:
- Plugin runner version, for issues related to plugin runners:
- LuaRocks version, for installation issues (run `luarocks --version`):
validations:
required: true

View File

@@ -0,0 +1,33 @@
### Description
<!-- Please include a summary of the change and which issue is fixed. -->
<!-- Please also include relevant motivation and context. -->
#### Which issue(s) this PR fixes:
<!--
*Automatically closes linked issue when PR is merged.
Usage: `Fixes #<issue number>`, or `Fixes (paste link of issue)`.
-->
Fixes #
### Checklist
- [ ] I have explained the need for this PR and the problem it solves
- [ ] I have explained the changes or the new features added to this PR
- [ ] I have added tests corresponding to this change
- [ ] I have updated the documentation to reflect this change
- [ ] I have verified that this change is backward compatible (If not, please discuss on the [APISIX mailing list](https://github.com/apache/apisix/tree/master#community) first)
<!--
Note
1. Mark the PR as draft until it's ready to be reviewed.
2. Always add/update tests for any changes unless you have a good reason.
3. Always update the documentation to reflect the changes made in the PR.
4. Make a new commit to resolve conversations instead of `push -f`.
5. To resolve merge conflicts, merge master instead of rebasing.
6. Use "request review" to notify the reviewer after making changes.
7. Only a reviewer can mark a conversation as resolved.
-->

View File

@@ -0,0 +1,10 @@
# Set update schedule for GitHub Actions
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
# Check for updates to GitHub Actions every weekday
interval: "daily"

View File

@@ -0,0 +1,177 @@
name: CI
on:
push:
branches: [master, 'release/**']
paths-ignore:
- 'docs/**'
- '**/*.md'
pull_request:
branches: [master, 'release/**']
paths-ignore:
- 'docs/**'
- '**/*.md'
concurrency:
group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/master' && github.run_number || github.ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
build:
strategy:
fail-fast: false
matrix:
platform:
- ubuntu-latest
os_name:
- linux_openresty
events_module:
- lua-resty-worker-events
- lua-resty-events
test_dir:
- t/plugin/[a-k]*
- t/plugin/[l-z]*
- t/admin t/cli t/config-center-yaml t/control t/core t/debug t/discovery t/error_page t/http3/admin t/misc
- t/node t/pubsub t/router t/script t/secret t/stream-node t/utils t/wasm t/xds-library t/xrpc
runs-on: ${{ matrix.platform }}
timeout-minutes: 90
env:
SERVER_NAME: ${{ matrix.os_name }}
OPENRESTY_VERSION: default
steps:
- name: Check out code
uses: actions/checkout@v4
with:
submodules: recursive
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: "1.17"
- name: Cache deps
uses: actions/cache@v4
env:
cache-name: cache-deps
with:
path: deps
key: ${{ runner.os }}-${{ env.cache-name }}-${{ matrix.os_name }}-${{ hashFiles('apisix-master-0.rockspec') }}
- name: Extract test type
shell: bash
id: test_env
run: |
test_dir="${{ matrix.test_dir }}"
if [[ $test_dir =~ 't/plugin' ]]; then
echo "type=plugin" >>$GITHUB_OUTPUT
fi
if [[ $test_dir =~ 't/admin ' ]]; then
echo "type=first" >>$GITHUB_OUTPUT
fi
if [[ $test_dir =~ ' t/xrpc' ]]; then
echo "type=last" >>$GITHUB_OUTPUT
fi
- name: Free disk space
run: |
bash ./ci/free_disk_space.sh
- name: Linux Before install
run: sudo ./ci/${{ matrix.os_name }}_runner.sh before_install
- name: Linux Install
run: |
sudo --preserve-env=OPENRESTY_VERSION \
./ci/${{ matrix.os_name }}_runner.sh do_install
- name: Linux launch common services
run: |
make ci-env-up project_compose_ci=ci/pod/docker-compose.common.yml
sudo ./ci/init-common-test-service.sh
- name: Cache images
id: cache-images
uses: actions/cache@v4
env:
cache-name: cache-apisix-docker-images
with:
path: docker-images-backup
key: ${{ runner.os }}-${{ env.cache-name }}-${{ steps.test_env.outputs.type }}-${{ hashFiles(format('./ci/pod/docker-compose.{0}.yml', steps.test_env.outputs.type )) }}
- if: ${{ steps.cache-images.outputs.cache-hit == 'true' }}
name: Load saved docker images
run: |
if [[ -f docker-images-backup/apisix-images.tar ]]; then
[[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh before
docker load --input docker-images-backup/apisix-images.tar
echo "loaded docker images"
# preserve storage space
rm docker-images-backup/apisix-images.tar
make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
if [[ ${{ steps.test_env.outputs.type }} != first ]]; then
sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh after
fi
fi
- if: ${{ steps.cache-images.outputs.cache-hit != 'true' }}
name: Linux launch services
run: |
[[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh before
[[ ${{ steps.test_env.outputs.type }} == plugin ]] && ./ci/pod/openfunction/build-function-image.sh
make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
[[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh after
echo "Linux launch services, done."
- name: Start Dubbo Backend
if: matrix.os_name == 'linux_openresty' && (steps.test_env.outputs.type == 'plugin' || steps.test_env.outputs.type == 'last')
run: |
cur_dir=$(pwd)
sudo apt update
sudo apt install -y maven openjdk-8-jdk
sudo update-java-alternatives --set java-1.8.0-openjdk-amd64
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
export PATH=$JAVA_HOME/bin:$PATH
cd t/lib/dubbo-backend
mvn package
cd dubbo-backend-provider/target
java \
-Djava.net.preferIPv4Stack=true \
-jar dubbo-demo-provider.one-jar.jar > /tmp/java.log &
cd $cur_dir/t/lib/dubbo-serialization-backend
mvn package
cd dubbo-serialization-backend-provider/target
java \
-Djava.net.preferIPv4Stack=true \
-jar dubbo-demo-provider.one-jar.jar > /tmp/java2.log &
- name: Build xDS library
if: steps.test_env.outputs.type == 'last'
run: |
cd t/xds-library
go build -o libxds.so -buildmode=c-shared main.go export.go
- name: Build wasm code
if: matrix.os_name == 'linux_openresty' && steps.test_env.outputs.type == 'last'
run: |
export TINYGO_VER=0.20.0
wget https://github.com/tinygo-org/tinygo/releases/download/v${TINYGO_VER}/tinygo_${TINYGO_VER}_amd64.deb 2>/dev/null
sudo dpkg -i tinygo_${TINYGO_VER}_amd64.deb
cd t/wasm && find . -type f -name "*.go" | xargs -Ip tinygo build -o p.wasm -scheduler=none -target=wasi p
- name: Linux Script
env:
TEST_FILE_SUB_DIR: ${{ matrix.test_dir }}
TEST_EVENTS_MODULE: ${{ matrix.events_module }}
run: sudo -E ./ci/${{ matrix.os_name }}_runner.sh script
- if: ${{ steps.cache-images.outputs.cache-hit != 'true' }}
name: Save docker images
run: |
echo "start backing up, $(date)"
bash ./ci/backup-docker-images.sh ${{ steps.test_env.outputs.type }}
echo "backup done, $(date)"

View File

@@ -0,0 +1,27 @@
name: Check Changelog
on:
push:
paths:
- 'CHANGELOG.md'
- 'ci/check_changelog_prs.ts'
pull_request:
paths:
- 'CHANGELOG.md'
- 'ci/check_changelog_prs.ts'
jobs:
check-changelog:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Run check_changelog_prs script
working-directory: ci
run: |
curl -fsSL https://bun.sh/install | bash
export PATH="$HOME/.bun/bin:$PATH"
bun run check_changelog_prs.ts

View File

@@ -0,0 +1,68 @@
name: CLI Test
on:
push:
branches: [master, 'release/**']
paths-ignore:
- 'docs/**'
- '**/*.md'
pull_request:
branches: [master, 'release/**']
paths-ignore:
- 'docs/**'
- '**/*.md'
concurrency:
group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/master' && github.run_number || github.ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
build:
strategy:
fail-fast: false
matrix:
platform:
- ubuntu-latest
job_name:
- linux_apisix_current_luarocks
- linux_apisix_current_luarocks_in_customed_nginx
runs-on: ${{ matrix.platform }}
timeout-minutes: 30
env:
SERVER_NAME: ${{ matrix.job_name }}
OPENRESTY_VERSION: default
steps:
- name: Check out code
uses: actions/checkout@v4
with:
submodules: recursive
- name: Cache deps
uses: actions/cache@v4
env:
cache-name: cache-deps
with:
path: deps
key: ${{ runner.os }}-${{ env.cache-name }}-${{ matrix.job_name }}-${{ hashFiles('apisix-master-0.rockspec') }}
- name: Linux launch common services
run: |
project_compose_ci=ci/pod/docker-compose.common.yml make ci-env-up
- name: Linux Before install
run: sudo ./ci/${{ matrix.job_name }}_runner.sh before_install
- name: Linux Install
run: |
sudo --preserve-env=OPENRESTY_VERSION \
./ci/${{ matrix.job_name }}_runner.sh do_install
- name: Linux Script
run: |
sudo chmod +x /home/runner
sudo ./ci/${{ matrix.job_name }}_runner.sh script

View File

@@ -0,0 +1,39 @@
name: Check Issues
on:
workflow_dispatch:
schedule:
- cron: '0 10 * * *'
permissions:
contents: read
jobs:
prune_stale:
permissions:
issues: write # for actions/stale to close stale issues
name: Prune Unresponded
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Prune Stale
uses: actions/stale@v8
with:
days-before-issue-stale: 60
days-before-issue-close: 3
stale-issue-message: >
Due to lack of the reporter's response this issue has been labeled with "no response".
It will be close in 3 days if no further activity occurs. If this issue is still
relevant, please simply write any comment. Even if closed, you can still revive the
issue at any time or discuss it on the dev@apisix.apache.org list.
Thank you for your contributions.
close-issue-message: >
This issue has been closed due to lack of activity. If you think that
is incorrect, or the issue requires additional review, you can revive the issue at
any time.
# Issues with these labels will never be considered stale.
only-labels: 'wait for update'
stale-issue-label: 'no response'
exempt-issue-labels: "don't close"
ascending: true

View File

@@ -0,0 +1,48 @@
name: Code Lint
on:
pull_request:
branches: [master, 'release/**']
paths-ignore:
- 'docs/**'
- '**/*.md'
permissions:
contents: read
jobs:
lint:
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- uses: actions/checkout@v4
- name: Install
run: |
. ./ci/common.sh
export_or_prefix
export OPENRESTY_VERSION=default
sudo -E ./ci/linux-install-openresty.sh
./utils/linux-install-luarocks.sh
sudo -E luarocks install luacheck
- name: Script
run: |
. ./ci/common.sh
export_or_prefix
make lint
sc-lint:
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Shellcheck code
run: |
scversion="latest"
wget -qO- "https://github.com/koalaman/shellcheck/releases/download/${scversion?}/shellcheck-${scversion?}.linux.x86_64.tar.xz" | tar -xJv
cp -av "shellcheck-${scversion}/shellcheck" /usr/local/bin/
shellcheck --version
git ls-files -- "*.sh" | xargs -t shellcheck

View File

@@ -0,0 +1,58 @@
name: Doc Lint
on:
push:
paths:
- "docs/**"
- "**/*.md"
- ".github/workflows/doc-lint.yml"
pull_request:
branches: [master, "release/**"]
paths:
- "docs/**"
- "**/*.md"
- ".github/workflows/doc-lint.yml"
permissions:
contents: read
jobs:
markdownlint:
name: 🍇 Markdown
runs-on: ubuntu-latest
timeout-minutes: 1
steps:
- uses: actions/checkout@v4
- name: 🚀 Use Node.js
uses: actions/setup-node@v4.4.0
with:
node-version: "12.x"
- run: npm install -g markdownlint-cli@0.25.0
- run: markdownlint '**/*.md'
- name: check category
run: |
./utils/check-category.py
- name: check Chinese doc
run: |
sudo pip3 install zhon
./utils/fix-zh-doc-segment.py > \
/tmp/check.log 2>&1 || (cat /tmp/check.log && exit 1)
if grep "find broken newline in file: " /tmp/check.log; then
cat /tmp/check.log
echo "Newline can't appear in the middle of Chinese sentences."
echo "You need to run ./utils/fix-zh-doc-segment.py to fix them."
exit 1
fi
Chinse-Copywriting-lint:
name: Chinese Copywriting
runs-on: ubuntu-latest
timeout-minutes: 1
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- name: Check Chinese copywriting
uses: ./.github/actions/autocorrect
with:
args: autocorrect --lint --no-diff-bg-color ./docs/zh/latest/

View File

@@ -0,0 +1,182 @@
name: CI GM (cron)
on:
schedule:
# UTC 7:30 every Friday
- cron: "30 7 * * 5"
permissions:
contents: read
jobs:
build:
strategy:
fail-fast: false
matrix:
platform:
- ubuntu-latest
os_name:
- linux_openresty_tongsuo
test_dir:
- t/plugin/[a-k]*
- t/plugin/[l-z]*
- t/admin t/cli t/config-center-yaml t/control t/core t/debug t/discovery t/error_page t/misc
- t/node t/pubsub t/router t/script t/stream-node t/utils t/wasm t/xds-library t/xrpc
runs-on: ${{ matrix.platform }}
timeout-minutes: 90
env:
SERVER_NAME: ${{ matrix.os_name }}
OPENRESTY_VERSION: default
# TODO: refactor the workflows to reduce duplicate parts. Maybe we can write them in shell
# scripts or a separate action?
steps:
- name: Check out code
uses: actions/checkout@v4
with:
submodules: recursive
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: "1.17"
- name: Cache deps
uses: actions/cache@v4
env:
cache-name: cache-deps
with:
path: deps
key: ${{ runner.os }}-${{ env.cache-name }}-${{ matrix.os_name }}-${{ hashFiles('apisix-master-0.rockspec') }}
- name: Cache Tongsuo compilation
id: cache-tongsuo
uses: actions/cache@v4
env:
cache-name: cache-tongsuo
with:
path: ./tongsuo
key: ${{ runner.os }}-${{ env.cache-name }}-${{ matrix.os_name }}-tongsuo-ver
- name: Test SSL Env
id: test_ssl_env
shell: bash
if: steps.cache-tongsuo.outputs.cache-hit != 'true'
run: |
echo "compile_tongsuo=true" >>$GITHUB_OUTPUT
- name: Extract test type
shell: bash
id: test_env
run: |
test_dir="${{ matrix.test_dir }}"
if [[ $test_dir =~ 't/plugin' ]]; then
echo "type=plugin" >>$GITHUB_OUTPUT
fi
if [[ $test_dir =~ 't/admin ' ]]; then
echo "type=first" >>$GITHUB_OUTPUT
fi
if [[ $test_dir =~ ' t/xrpc' ]]; then
echo "type=last" >>$GITHUB_OUTPUT
fi
- name: Free disk space
run: |
bash ./ci/free_disk_space.sh
- name: Linux launch common services
run: |
make ci-env-up project_compose_ci=ci/pod/docker-compose.common.yml
sudo ./ci/init-common-test-service.sh
- name: Cache images
id: cache-images
uses: actions/cache@v4
env:
cache-name: cache-apisix-docker-images
with:
path: docker-images-backup
key: ${{ runner.os }}-${{ env.cache-name }}-${{ steps.test_env.outputs.type }}-${{ hashFiles(format('./ci/pod/docker-compose.{0}.yml', steps.test_env.outputs.type )) }}
- if: ${{ steps.cache-images.outputs.cache-hit == 'true' }}
name: Load saved docker images
run: |
if [[ -f docker-images-backup/apisix-images.tar ]]; then
[[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh before
docker load --input docker-images-backup/apisix-images.tar
rm docker-images-backup/apisix-images.tar
make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
echo "loaded docker images"
if [[ ${{ steps.test_env.outputs.type }} != first ]]; then
sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh after
fi
fi
- if: ${{ steps.cache-images.outputs.cache-hit != 'true' }}
name: Linux launch services
run: |
[[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh before
[[ ${{ steps.test_env.outputs.type }} == plugin ]] && ./ci/pod/openfunction/build-function-image.sh
make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
[[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh after
echo "Linux launch services, done."
- name: Start Dubbo Backend
if: steps.test_env.outputs.type == 'plugin'
run: |
cur_dir=$(pwd)
sudo apt update
sudo apt install -y maven openjdk-8-jdk
sudo update-java-alternatives --set java-1.8.0-openjdk-amd64
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
export PATH=$JAVA_HOME/bin:$PATH
cd t/lib/dubbo-backend
mvn package
cd dubbo-backend-provider/target
java \
-Djava.net.preferIPv4Stack=true \
-jar dubbo-demo-provider.one-jar.jar > /tmp/java.log &
cd $cur_dir/t/lib/dubbo-serialization-backend
mvn package
cd dubbo-serialization-backend-provider/target
java \
-Djava.net.preferIPv4Stack=true \
-jar dubbo-demo-provider.one-jar.jar > /tmp/java2.log &
- name: Build xDS library
if: steps.test_env.outputs.type == 'last'
run: |
cd t/xds-library
go build -o libxds.so -buildmode=c-shared main.go export.go
- name: Build wasm code
if: steps.test_env.outputs.type == 'last'
run: |
export TINYGO_VER=0.20.0
wget https://github.com/tinygo-org/tinygo/releases/download/v${TINYGO_VER}/tinygo_${TINYGO_VER}_amd64.deb 2>/dev/null
sudo dpkg -i tinygo_${TINYGO_VER}_amd64.deb
cd t/wasm && find . -type f -name "*.go" | xargs -Ip tinygo build -o p.wasm -scheduler=none -target=wasi p
- name: Linux Before install
env:
COMPILE_TONGSUO: ${{ steps.test_ssl_env.outputs.compile_tongsuo }}
run: |
sudo --preserve-env=COMPILE_TONGSUO \
./ci/${{ matrix.os_name }}_runner.sh before_install
- name: Linux Install
run: |
sudo --preserve-env=OPENRESTY_VERSION \
./ci/${{ matrix.os_name }}_runner.sh do_install
- name: Linux Script
env:
TEST_FILE_SUB_DIR: ${{ matrix.test_dir }}
run: sudo -E ./ci/${{ matrix.os_name }}_runner.sh script
- if: ${{ steps.cache-images.outputs.cache-hit != 'true' }}
name: Save docker images
run: |
echo "start backing up, $(date)"
bash ./ci/backup-docker-images.sh ${{ steps.test_env.outputs.type }}
echo "backup done, $(date)"

View File

@@ -0,0 +1,93 @@
name: CI GM
on:
push:
branches: [master]
paths-ignore:
- 'docs/**'
- '**/*.md'
pull_request:
branches: [master]
paths-ignore:
- 'docs/**'
- '**/*.md'
concurrency:
group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/master' && github.run_number || github.ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
build:
strategy:
fail-fast: false
matrix:
platform:
- ubuntu-latest
os_name:
- linux_openresty_tongsuo
test_dir:
- t/gm
runs-on: ${{ matrix.platform }}
timeout-minutes: 90
env:
SERVER_NAME: ${{ matrix.os_name }}
OPENRESTY_VERSION: default
steps:
- name: Check out code
uses: actions/checkout@v4
with:
submodules: recursive
- name: Cache deps
uses: actions/cache@v4
env:
cache-name: cache-deps
with:
path: deps
key: ${{ runner.os }}-${{ env.cache-name }}-${{ matrix.os_name }}-${{ hashFiles('apisix-master-0.rockspec') }}
- name: Cache Tongsuo compilation
id: cache-tongsuo
uses: actions/cache@v4
env:
cache-name: cache-tongsuo
with:
path: ./tongsuo
# TODO: use a fixed release once they have created one.
# See https://github.com/Tongsuo-Project/Tongsuo/issues/318
key: ${{ runner.os }}-${{ env.cache-name }}-${{ matrix.os_name }}-tongsuo-ver
- name: Test SSL Env
id: test_ssl_env
shell: bash
if: steps.cache-tongsuo.outputs.cache-hit != 'true'
run: |
echo "compile_tongsuo=true" >>$GITHUB_OUTPUT
- name: Linux launch common services
run: |
make ci-env-up project_compose_ci=ci/pod/docker-compose.common.yml
sudo ./ci/init-common-test-service.sh
- name: Linux Before install
env:
COMPILE_TONGSUO: ${{ steps.test_ssl_env.outputs.compile_tongsuo }}
run: |
sudo --preserve-env=COMPILE_TONGSUO \
./ci/${{ matrix.os_name }}_runner.sh before_install
- name: Linux Do install
run: |
sudo --preserve-env=OPENRESTY_VERSION \
./ci/${{ matrix.os_name }}_runner.sh do_install
- name: Linux Script
env:
TEST_FILE_SUB_DIR: ${{ matrix.test_dir }}
run: |
sudo -E ./ci/${{ matrix.os_name }}_runner.sh script

View File

@@ -0,0 +1,79 @@
name: CI Kubernetes
on:
push:
branches: [ master, 'release/**' ]
paths-ignore:
- 'docs/**'
- '**/*.md'
pull_request:
branches: [ master, 'release/**' ]
paths-ignore:
- 'docs/**'
- '**/*.md'
concurrency:
group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/master' && github.run_number || github.ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
kubernetes-discovery:
strategy:
fail-fast: false
matrix:
platform:
- ubuntu-latest
os_name:
- linux_openresty
runs-on: ${{ matrix.platform }}
timeout-minutes: 15
env:
SERVER_NAME: ${{ matrix.os_name }}
OPENRESTY_VERSION: default
steps:
- name: Check out code
uses: actions/checkout@v4
with:
submodules: recursive
- name: Setup kubernetes cluster
run: |
KUBERNETES_VERSION="v1.22.7"
kind create cluster --name apisix-test --config ./t/kubernetes/configs/kind.yaml --image kindest/node:${KUBERNETES_VERSION}
kubectl wait --for=condition=Ready nodes --all --timeout=180s
kubectl apply -f ./t/kubernetes/configs/account.yaml
kubectl apply -f ./t/kubernetes/configs/endpoint.yaml
KUBERNETES_CLIENT_TOKEN_CONTENT=$(kubectl get secrets | grep apisix-test | awk '{system("kubectl get secret -o jsonpath={.data.token} "$1" | base64 --decode")}')
KUBERNETES_CLIENT_TOKEN_DIR="/tmp/var/run/secrets/kubernetes.io/serviceaccount"
KUBERNETES_CLIENT_TOKEN_FILE=${KUBERNETES_CLIENT_TOKEN_DIR}/token
mkdir -p ${KUBERNETES_CLIENT_TOKEN_DIR}
echo -n "$KUBERNETES_CLIENT_TOKEN_CONTENT" > ${KUBERNETES_CLIENT_TOKEN_FILE}
echo 'KUBERNETES_SERVICE_HOST=127.0.0.1'
echo 'KUBERNETES_SERVICE_PORT=6443'
echo 'KUBERNETES_CLIENT_TOKEN='"${KUBERNETES_CLIENT_TOKEN_CONTENT}"
echo 'KUBERNETES_CLIENT_TOKEN_FILE='${KUBERNETES_CLIENT_TOKEN_FILE}
kubectl proxy -p 6445 &
- name: Linux Install
run: |
sudo ./ci/${{ matrix.os_name }}_runner.sh before_install
sudo --preserve-env=OPENRESTY_VERSION ./ci/${{ matrix.os_name }}_runner.sh do_install
- name: Run test cases
run: |
./ci/kubernetes-ci.sh run_case

View File

@@ -0,0 +1,37 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
name: License checker
on:
push:
branches: [master, 'release/**']
pull_request:
branches: [master, 'release/**']
jobs:
check-license:
runs-on: ubuntu-latest
timeout-minutes: 3
steps:
- uses: actions/checkout@v4
- name: Check License Header
uses: apache/skywalking-eyes@v0.6.0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -0,0 +1,49 @@
name: 'Link Checker'
# **What it does**: Renders the content of every page and check all internal links.
# **Why we have it**: To make sure all links connect correctly.
# **Who does it impact**: Docs content.
on:
workflow_dispatch:
push:
# branches: [master, 'release/**']
paths:
- '**/*.md'
- '**/link-check.yml'
pull_request:
branches: [master, "release/**"]
paths:
- '**/*.md'
- '**/link-check.yml'
permissions:
contents: read
# Needed for the 'trilom/file-changes-action' action
pull-requests: read
# This allows a subsequently queued workflow run to interrupt previous runs
concurrency:
group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}'
cancel-in-progress: true
jobs:
check-links:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Get script
run: |
wget https://raw.githubusercontent.com/xuruidong/markdown-link-checker/main/link_checker.py
- name: Setup python
uses: actions/setup-python@v5
with:
python-version: '3.9'
- name: Link check (critical, all files)
run: |
# python link_checker.py ./ --enable-external --ignore "http://apisix.iresty.com" "https://www.upyun.com" "https://github.com/apache/apisix/actions/workflows/build.yml/badge.svg" "https://httpbin.org/" "https://en.wikipedia.org/wiki/Cache"
python link_checker.py ./

View File

@@ -0,0 +1,46 @@
name: ❄️ Lint
on: [push, pull_request]
permissions:
contents: read
jobs:
misc:
name: misc checker
runs-on: ubuntu-latest
steps:
- name: Check out code.
uses: actions/checkout@v4
- name: spell check
run: |
pip install codespell==2.1.0
# codespell considers some repo name in go.sum are misspelled
git grep --cached -l '' | grep -v go.sum | grep -v pnpm-lock.yaml |xargs codespell --ignore-words=.ignore_words --skip="*.ts,*.mts"
- name: Merge conflict
run: |
bash ./utils/check-merge-conflict.sh
- name: Plugin Code
run: |
bash ./utils/check-plugins-code.sh
ci-eclint:
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- name: Check out code
uses: actions/checkout@v4
- name: Setup Nodejs env
uses: actions/setup-node@v4.4.0
with:
node-version: '12'
- name: Install eclint
run: |
sudo npm install -g eclint
- name: Run eclint
run: |
eclint check

View File

@@ -0,0 +1,109 @@
name: Build and Push `apisix:dev` to DockerHub on Commit
on:
pull_request:
paths-ignore:
- "docs/**"
- "**/*.md"
push:
paths-ignore:
- "docs/**"
- "**/*.md"
workflow_dispatch:
jobs:
build-test-push:
strategy:
matrix:
include:
- runner: ubuntu-24.04
arch: amd64
- runner: ubuntu-24.04-arm
arch: arm64
runs-on: ${{ matrix.runner }}
env:
APISIX_DOCKER_TAG: master-debian-dev
ENV_OS_ARCH: ${{ matrix.arch }}
DOCKER_BUILDKIT: 1
steps:
- name: Check out the repo
uses: actions/checkout@v4
- name: Build APISIX Dashboard
run: |
# install node.js and pnpm
sudo n lts
corepack enable pnpm
# prepare apisix-dashboard source code
source .requirements
git clone --revision=${APISIX_DASHBOARD_COMMIT} --depth 1 https://github.com/apache/apisix-dashboard.git
pushd apisix-dashboard
# compile
pnpm install --frozen-lockfile
pnpm run build
popd
# copy the dist files to the ui directory
mkdir ui
cp -r apisix-dashboard/dist/* ui/
rm -r apisix-dashboard
- name: Build and run
run: |
make build-on-debian-dev
docker compose -f ./docker/compose/docker-compose-master.yaml up -d
sleep 30
docker logs compose-apisix-1
- name: Test APISIX
run: |
curl http://127.0.0.1:9180/apisix/admin/routes/1 \
-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
{
"uri": "/get",
"upstream": {
"type": "roundrobin",
"nodes": { "httpbin.org:80": 1 }
}
}'
result_code=$(curl -I -m 10 -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/get)
if [[ $result_code -ne 200 ]]; then
printf "result_code: %s\n" "$result_code"
exit 125
fi
- name: Login to Docker Hub
if: github.ref == 'refs/heads/master'
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USER }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Push apisix image to Docker Hub
if: github.ref == 'refs/heads/master'
run: |
make push-on-debian-dev
merge-tags:
needs: build-test-push
if: github.ref == 'refs/heads/master'
runs-on: ubuntu-latest
steps:
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USER }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Check out the repo
uses: actions/checkout@v4
- name: Merge architecture-specific tags
run: |
make merge-dev-tags

View File

@@ -0,0 +1,179 @@
name: CI Redhat UBI - Daily
on:
schedule:
- cron: "0 0 * * *"
pull_request:
branches: [master]
paths-ignore:
- 'docs/**'
- '**/*.md'
concurrency:
group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/master' && github.run_number || github.ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
test_apisix:
name: run ci on redhat ubi
runs-on: ubuntu-latest
timeout-minutes: 90
strategy:
fail-fast: false
matrix:
events_module:
- lua-resty-worker-events
- lua-resty-events
test_dir:
- t/plugin/[a-k]*
- t/plugin/[l-z]*
- t/admin t/cli t/config-center-yaml t/control t/core t/debug t/discovery t/error_page t/misc
- t/node t/pubsub t/router t/script t/secret t/stream-node t/utils t/xds-library
steps:
- name: Check out code
uses: actions/checkout@v4
with:
submodules: recursive
- name: Cache deps
uses: actions/cache@v4
env:
cache-name: cache-deps
with:
path: deps
key: ${{ runner.os }}-${{ env.cache-name }}-ubi8.6-${{ hashFiles('apisix-master-0.rockspec') }}
- name: Extract branch name
if: ${{ startsWith(github.ref, 'refs/heads/release/') }}
id: branch_env
shell: bash
run: |
echo "version=${GITHUB_REF##*/}" >>$GITHUB_OUTPUT
- name: Extract test type
shell: bash
id: test_env
run: |
test_dir="${{ matrix.test_dir }}"
if [[ $test_dir =~ 't/plugin' ]]; then
echo "type=plugin" >>$GITHUB_OUTPUT
fi
if [[ $test_dir =~ 't/admin ' ]]; then
echo "type=first" >>$GITHUB_OUTPUT
fi
if [[ $test_dir =~ ' t/xds-library' ]]; then
echo "type=last" >>$GITHUB_OUTPUT
fi
- name: Free disk space
run: |
bash ./ci/free_disk_space.sh
- name: Linux launch common services
run: |
make ci-env-up project_compose_ci=ci/pod/docker-compose.common.yml
sudo ./ci/init-common-test-service.sh
- name: Build rpm package
if: ${{ startsWith(github.ref, 'refs/heads/release/') }}
run: |
export VERSION=${{ steps.branch_env.outputs.version }}
sudo gem install --no-document fpm
git clone --depth 1 https://github.com/api7/apisix-build-tools.git
# move codes under build tool
mkdir ./apisix-build-tools/apisix
for dir in `ls|grep -v "^apisix-build-tools$"`;do cp -r $dir ./apisix-build-tools/apisix/;done
cd apisix-build-tools
make package type=rpm app=apisix version=${VERSION} checkout=release/${VERSION} image_base=ubi image_tag=8.6 local_code_path=./apisix
cd ..
rm -rf $(ls -1 -I apisix-build-tools -I t -I utils -I ci --ignore=Makefile -I "*.rockspec")
- name: Start Dubbo Backend
run: |
cur_dir=$(pwd)
sudo apt update
sudo apt install -y maven
cd t/lib/dubbo-backend
mvn package
cd dubbo-backend-provider/target
java \
-Djava.net.preferIPv4Stack=true \
-jar dubbo-demo-provider.one-jar.jar > /tmp/java.log &
cd $cur_dir/t/lib/dubbo-serialization-backend
mvn package
cd dubbo-serialization-backend-provider/target
java \
-Djava.net.preferIPv4Stack=true \
-jar dubbo-demo-provider.one-jar.jar > /tmp/java2.log &
- name: Build xDS library
if: steps.test_env.outputs.type == 'last'
run: |
cd t/xds-library
go build -o libxds.so -buildmode=c-shared main.go export.go
- name: Run redhat docker and mapping apisix into container
env:
TEST_FILE_SUB_DIR: ${{ matrix.test_dir }}
TEST_EVENTS_MODULE: ${{ matrix.events_module }}
run: |
docker run -itd -v ${{ github.workspace }}:/apisix --env TEST_FILE_SUB_DIR="$TEST_FILE_SUB_DIR" --env TEST_EVENTS_MODULE="$TEST_EVENTS_MODULE" --name ubiInstance --net="host" --dns 8.8.8.8 --dns-search apache.org registry.access.redhat.com/ubi8/ubi:8.6 /bin/bash
- name: Cache images
id: cache-images
uses: actions/cache@v4
env:
cache-name: cache-apisix-docker-images
with:
path: docker-images-backup
key: ${{ runner.os }}-${{ env.cache-name }}-${{ steps.test_env.outputs.type }}-${{ hashFiles(format('./ci/pod/docker-compose.{0}.yml', steps.test_env.outputs.type )) }}
- if: ${{ steps.cache-images.outputs.cache-hit == 'true' }}
name: Load saved docker images
run: |
if [[ -f docker-images-backup/apisix-images.tar ]]; then
[[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh before
docker load --input docker-images-backup/apisix-images.tar
rm docker-images-backup/apisix-images.tar
make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
echo "loaded docker images"
if [[ ${{ steps.test_env.outputs.type }} != first ]]; then
sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh after
fi
fi
- if: ${{ steps.cache-images.outputs.cache-hit != 'true' }}
name: Linux launch services
run: |
[[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh before
[[ ${{ steps.test_env.outputs.type }} == plugin ]] && ./ci/pod/openfunction/build-function-image.sh
make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
[[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh after
echo "Linux launch services, done."
- name: Install dependencies
run: |
docker exec ubiInstance bash -c "cd apisix && chmod +x ./ci/redhat-ci.sh && ./ci/redhat-ci.sh install_dependencies"
- name: Install rpm package
if: ${{ startsWith(github.ref, 'refs/heads/release/') }}
run: |
docker exec ubiInstance bash -c "cd apisix && rpm -iv --prefix=/apisix ./apisix-build-tools/output/apisix-${{ steps.branch_env.outputs.version }}-0.ubi8.6.x86_64.rpm"
# Dependencies are attached with rpm, so revert `make deps`
docker exec ubiInstance bash -c "cd apisix && rm -rf deps"
docker exec ubiInstance bash -c "cd apisix && mv usr/bin . && mv usr/local/apisix/* ."
- name: Run test cases
run: |
docker exec ubiInstance bash -c "cd apisix && chmod +x ./ci/redhat-ci.sh && ./ci/redhat-ci.sh run_case"
- if: ${{ steps.cache-images.outputs.cache-hit != 'true' }}
name: Save docker images
run: |
echo "start backing up, $(date)"
bash ./ci/backup-docker-images.sh ${{ steps.test_env.outputs.type }}
echo "backup done, $(date)"

View File

@@ -0,0 +1,35 @@
name: "PR Lint"
on:
pull_request_target:
types:
- opened
- edited
- synchronize
jobs:
main:
name: Validate PR title
runs-on: ubuntu-latest
steps:
- name: Check out repository code
uses: actions/checkout@v4
with:
submodules: recursive
- uses: ./.github/actions/action-semantic-pull-request
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
types: |
feat
fix
docs
style
refactor
perf
test
build
ci
chore
revert
change

View File

@@ -0,0 +1,124 @@
name: Source Code Install
on:
push:
branches: [master, 'release/**']
paths-ignore:
- 'docs/**'
- '**/*.md'
pull_request:
branches: [master, 'release/**']
paths-ignore:
- 'docs/**'
- '**/*.md'
concurrency:
group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/master' && github.run_number || github.ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
install-on-multi-platform:
strategy:
fail-fast: false
matrix:
platform:
- ubuntu-latest
os_platform:
- ubuntu
- redhat
services:
etcd:
image: bitnami/etcd:3.5.4
ports:
- 2379:2379
- 2380:2380
env:
ALLOW_NONE_AUTHENTICATION: yes
ETCD_ADVERTISE_CLIENT_URLS: http://0.0.0.0:2379
httpbin:
image: kennethreitz/httpbin
ports:
- 8088:80
runs-on: ${{ matrix.platform }}
timeout-minutes: 30
steps:
- name: Check out code
uses: actions/checkout@v4
with:
submodules: recursive
- name: Cache deps
uses: actions/cache@v4
env:
cache-name: cache-deps
with:
path: deps
key: ${{ runner.os }}-${{ env.cache-name }}-${{ matrix.os_platform }}-${{ hashFiles('apisix-master-0.rockspec') }}
- name: Install and start apisix on ${{ matrix.os_platform }}
env:
INSTALL_PLATFORM: ${{ matrix.os_platform }}
run: |
if [[ $INSTALL_PLATFORM == "ubuntu" ]]; then
sudo apt-get update
sudo apt-get install -y git sudo make
make deps
sudo make install
apisix start
elif [[ $INSTALL_PLATFORM == "redhat" ]]; then
docker run -itd -v ${{ github.workspace }}:/apisix --name ubi8 --net="host" --dns 8.8.8.8 --dns-search apache.org registry.access.redhat.com/ubi8/ubi:8.6 /bin/bash
docker exec ubi8 bash -c "yum install -y git sudo make"
docker exec ubi8 bash -c "cd apisix && make deps"
docker exec ubi8 bash -c "cd apisix && make install"
docker exec ubi8 bash -c "cd apisix && apisix start"
elif [[ $INSTALL_PLATFORM == "centos7" ]]; then
docker run -itd -v ${{ github.workspace }}:/apisix --name centos7Instance --net="host" --dns 8.8.8.8 --dns-search apache.org docker.io/centos:7 /bin/bash
docker exec centos7Instance bash -c "yum install -y git sudo make"
docker exec centos7Instance bash -c "cd apisix && make deps"
docker exec centos7Instance bash -c "cd apisix && make install"
docker exec centos7Instance bash -c "cd apisix && apisix start"
fi
sleep 6
- name: Test apisix
run: |
wget https://github.com/mikefarah/yq/releases/download/3.4.1/yq_linux_amd64 -O /usr/bin/yq && sudo chmod +x /usr/bin/yq
get_admin_key() {
local admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml)
echo "$admin_key"
}
export admin_key=$(get_admin_key); echo $admin_key
cat conf/config.yaml
curl -v http://127.0.0.1:9180/apisix/admin/routes/1 \
-H "X-API-KEY: $admin_key" -X PUT -d '
{
"uri": "/get",
"upstream": {
"type": "roundrobin",
"nodes": {
"127.0.0.1:8088": 1
}
}
}'
result_code=`curl -I -m 10 -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/get`
if [[ $result_code -ne 200 ]]; then
printf "result_code: %s\n" "$result_code"
echo "===============access.log==============="
cat logs/access.log
echo "===============error.log==============="
cat logs/error.log
exit 125
fi
- name: Check error log
run: |
if grep -q '\[error\]' logs/error.log; then
echo "=====found error log====="
cat /usr/local/apisix/logs/error.log
exit 125
fi

View File

@@ -0,0 +1,52 @@
name: Stable Test
on:
workflow_dispatch:
schedule:
- cron: '0 10 * * *'
permissions:
contents: read
jobs:
prune_stale:
permissions:
issues: write # for actions/stale to close stale issues
pull-requests: write # for actions/stale to close stale PRs
name: Prune Stale
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Prune Stale
uses: actions/stale@v8
with:
days-before-issue-stale: 350
days-before-issue-close: 14
stale-issue-message: >
This issue has been marked as stale due to 350 days of inactivity.
It will be closed in 2 weeks if no further activity occurs. If this issue is still
relevant, please simply write any comment. Even if closed, you can still revive the
issue at any time or discuss it on the dev@apisix.apache.org list.
Thank you for your contributions.
close-issue-message: >
This issue has been closed due to lack of activity. If you think that
is incorrect, or the issue requires additional review, you can revive the issue at
any time.
days-before-pr-stale: 60
days-before-pr-close: 28
stale-pr-message: >
This pull request has been marked as stale due to 60 days of inactivity.
It will be closed in 4 weeks if no further activity occurs. If you think
that's incorrect or this pull request should instead be reviewed, please simply
write any comment. Even if closed, you can still revive the PR at any time or
discuss it on the dev@apisix.apache.org list.
Thank you for your contributions.
close-pr-message: >
This pull request/issue has been closed due to lack of activity. If you think that
is incorrect, or the pull request requires review, you can revive the PR at any time.
# Issues with these labels will never be considered stale.
exempt-issue-labels: 'bug,enhancement,good first issue'
stale-issue-label: 'stale'
stale-pr-label: 'stale'
ascending: true

View File

@@ -0,0 +1,55 @@
name: CI Tars
on:
push:
branches: [ master, 'release/**' ]
paths-ignore:
- 'docs/**'
- '**/*.md'
pull_request:
branches: [ master, 'release/**' ]
paths-ignore:
- 'docs/**'
- '**/*.md'
concurrency:
group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/master' && github.run_number || github.ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
tars:
strategy:
fail-fast: false
matrix:
platform:
- ubuntu-latest
os_name:
- linux_openresty
runs-on: ${{ matrix.platform }}
timeout-minutes: 15
env:
SERVER_NAME: ${{ matrix.os_name }}
OPENRESTY_VERSION: default
steps:
- name: Check out code
uses: actions/checkout@v4
with:
submodules: recursive
- name: Setup Tars MySql
run: |
docker run -d -p 3306:3306 -v $PWD/t/tars/conf/tars.sql:/docker-entrypoint-initdb.d/tars.sql -e MYSQL_ROOT_PASSWORD=tars2022 mysql:5.7
- name: Linux Install
run: |
sudo ./ci/${{ matrix.os_name }}_runner.sh before_install
sudo --preserve-env=OPENRESTY_VERSION ./ci/${{ matrix.os_name }}_runner.sh do_install
- name: Run test cases
run: |
./ci/tars-ci.sh run_case

View File

@@ -0,0 +1,62 @@
name: Update labels when user responds in issue and pr
permissions:
issues: write
pull-requests: write
on:
issue_comment:
types: [created]
pull_request_review_comment:
types: [created]
jobs:
issue_commented:
if: github.event.issue && !github.event.issue.pull_request && github.event.comment.user.login == github.event.issue.user.login && contains(github.event.issue.labels.*.name, 'wait for update') && !contains(github.event.issue.labels.*.name, 'user responded')
runs-on: ubuntu-latest
steps:
- name: update labels when user responds
uses: actions/github-script@v7
with:
script: |
github.rest.issues.addLabels({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
labels: ["user responded"]
})
github.rest.issues.removeLabel({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
name: "wait for update"
})
pr_commented:
if: github.event.issue && github.event.issue.pull_request && github.event.comment.user.login == github.event.issue.user.login && (contains(github.event.issue.labels.*.name, 'wait for update') || contains(github.event.issue.labels.*.name, 'discuss') || contains(github.event.issue.labels.*.name, 'need test cases')) && !contains(github.event.issue.labels.*.name, 'user responded')
runs-on: ubuntu-latest
steps:
- name: update label when user responds
uses: actions/github-script@v7
with:
script: |
github.rest.issues.addLabels({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
labels: ["user responded"]
})
pr_review_commented:
if: github.event.pull_request && github.event.comment.user.login == github.event.pull_request.user.login && (contains(github.event.pull_request.labels.*.name, 'wait for update') || contains(github.event.pull_request.labels.*.name, 'discuss') || contains(github.event.issue.labels.*.name, 'need test cases')) && !contains(github.event.pull_request.labels.*.name, 'user responded')
runs-on: ubuntu-latest
steps:
- name: update label when user responds
uses: actions/github-script@v7
with:
script: |
github.rest.issues.addLabels({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
labels: ["user responded"]
})

View File

@@ -0,0 +1,9 @@
[submodule "t/toolkit"]
path = t/toolkit
url = https://github.com/api7/test-toolkit.git
[submodule ".github/actions/action-semantic-pull-request"]
path = .github/actions/action-semantic-pull-request
url = https://github.com/amannn/action-semantic-pull-request.git
[submodule ".github/actions/autocorrect"]
path = .github/actions/autocorrect
url = https://github.com/huacnlee/autocorrect.git

View File

@@ -0,0 +1,11 @@
iam
te
ba
ue
shttp
nd
hel
nulll
smove
aks
nin

View File

@@ -0,0 +1,60 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
header:
license:
spdx-id: Apache-2.0
copyright-owner: Apache Software Foundation
license-location-threshold: 360
paths-ignore:
- '.gitignore'
- '.gitattributes'
- '.gitmodules'
- 'LICENSE'
- 'NOTICE'
- '**/*.json'
- '**/*.key'
- '**/*.crt'
- '**/*.pem'
- '**/*.pb.go'
- '**/pnpm-lock.yaml'
- '.github/'
- 'conf/mime.types'
- '**/*.svg'
# Exclude CI env_file
- 'ci/pod/**/*.env'
# eyes has some limitation to handle git pattern
- '**/*.log'
# Exclude test toolkit files
- 't/toolkit'
- 'go.mod'
- 'go.sum'
# Exclude non-Apache licensed files
- 'apisix/balancer/ewma.lua'
# Exclude plugin-specific configuration files
- 't/plugin/authz-casbin'
- 't/coredns'
- 't/fuzzing/requirements.txt'
- 'autodocs/'
- 'docs/**/*.md'
- '.ignore_words'
- '.luacheckrc'
# Exclude file contains certificate revocation information
- 't/certs/ocsp/index.txt'
comment: on-failure

View File

@@ -0,0 +1,34 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
MD001: false
MD004: false
MD005: false
MD006: false
MD007: false
MD010: false
MD013: false
MD014: false
MD024: false
MD026: false
MD029: false
MD033: false
MD034: false
MD036: false
MD040: false
MD041: false
MD046: false

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,121 @@
<!--
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
-->
*The following is copied for your convenience from <https://www.apache.org/foundation/policies/conduct.html>. If there's a discrepancy between the two, let us know or submit a PR to fix it.*
# Code of Conduct #
## Introduction ##
This code of conduct applies to all spaces managed by the Apache
Software Foundation, including IRC, all public and private mailing
lists, issue trackers, wikis, blogs, Twitter, and any other
communication channel used by our communities. A code of conduct which
is specific to in-person events (ie., conferences) is codified in the
published ASF anti-harassment policy.
We expect this code of conduct to be honored by everyone who
participates in the Apache community formally or informally, or claims
any affiliation with the Foundation, in any Foundation-related
activities and especially when representing the ASF, in any role.
This code __is not exhaustive or complete__. It serves to distill our
common understanding of a collaborative, shared environment and goals.
We expect it to be followed in spirit as much as in the letter, so that
it can enrich all of us and the technical communities in which we participate.
## Specific Guidelines ##
We strive to:
1. __Be open.__ We invite anyone to participate in our community. We preferably use public methods of communication for project-related messages, unless discussing something sensitive. This applies to messages for help or project-related support, too; not only is a public support request much more likely to result in an answer to a question, it also makes sure that any inadvertent mistakes made by people answering will be more easily detected and corrected.
2. __Be `empathetic`, welcoming, friendly, and patient.__ We work together to resolve conflict, assume good intentions, and do our best to act in an empathetic fashion. We may all experience some frustration from time to time, but we do not allow frustration to turn into a personal attack. A community where people feel uncomfortable or threatened is not a productive one. We should be respectful when dealing with other community members as well as with people outside our community.
3. __Be collaborative.__ Our work will be used by other people, and in turn we will depend on the work of others. When we make something for the benefit of the project, we are willing to explain to others how it works, so that they can build on the work to make it even better. Any decision we make will affect users and colleagues, and we take those consequences seriously when making decisions.
4. __Be inquisitive.__ Nobody knows everything! Asking questions early avoids many problems later, so questions are encouraged, though they may be directed to the appropriate forum. Those who are asked should be responsive and helpful, within the context of our shared goal of improving Apache project code.
5. __Be careful in the words that we choose.__ Whether we are participating as professionals or volunteers, we value professionalism in all interactions, and take responsibility for our own speech. Be kind to others. Do not insult or put down other participants. Harassment and other exclusionary behaviour are not acceptable. This includes, but is not limited to:
* Violent threats or language directed against another person.
* Sexist, racist, or otherwise discriminatory jokes and language.
* Posting sexually explicit or violent material.
* Posting (or threatening to post) other people's personally identifying information ("doxing").
* Sharing private content, such as emails sent privately or non-publicly, or unlogged forums such as IRC channel history.
* Personal insults, especially those using racist or sexist terms.
* Unwelcome sexual attention.
* Excessive or unnecessary profanity.
* Repeated harassment of others. In general, if someone asks you to stop, then stop.
* Advocating for, or encouraging, any of the above behaviour.
6. __Be concise.__ Keep in mind that what you write once will be read by hundreds of people. Writing a short email means people can understand the conversation as efficiently as possible. Short emails should always strive to be empathetic, welcoming, friendly and patient. When a long explanation is necessary, consider adding a summary.</p>
Try to bring new ideas to a conversation so that each mail adds something unique to the thread, keeping in mind that the rest of the thread still contains the other messages with arguments that have already been made.
Try to stay on topic, especially in discussions that are already fairly large.
7. __Step down considerately.__ Members of every project come and go. When somebody leaves or disengages from the project they should tell people they are leaving and take the proper steps to ensure that others can pick up where they left off. In doing so, they should remain respectful of those who continue to participate in the project and should not misrepresent the project's goals or achievements. Likewise, community members should respect any individual's choice to leave the project.</p>
## Diversity Statement ##
Apache welcomes and encourages participation by everyone. We are committed to being a community that everyone feels good about joining. Although we may not be able to satisfy everyone, we will always work to treat everyone well.
No matter how you identify yourself or how others perceive you: we welcome you. Though no list can hope to be comprehensive, we explicitly honour diversity in: age, culture, ethnicity, genotype, gender identity or expression, language, national origin, neurotype, phenotype, political beliefs, profession, race, religion, sexual orientation, socioeconomic status, subculture and technical ability.
Though we welcome people fluent in all languages, Apache development is conducted in English.
Standards for behaviour in the Apache community are detailed in the Code of Conduct above. We expect participants in our community to meet these standards in all their interactions and to help others to do so as well.
## Reporting Guidelines ##
While this code of conduct should be adhered to by participants, we recognize that sometimes people may have a bad day, or be unaware of some of the guidelines in this code of conduct. When that happens, you may reply to them and point out this code of conduct. Such messages may be in public or in private, whatever is most appropriate. However, regardless of whether the message is public or not, it should still adhere to the relevant parts of this code of conduct; in particular, it should not be abusive or disrespectful.
If you believe someone is violating this code of conduct, you may reply to
them and point out this code of conduct. Such messages may be in public or in
private, whatever is most appropriate. Assume good faith; it is more likely
that participants are unaware of their bad behaviour than that they
intentionally try to degrade the quality of the discussion. Should there be
difficulties in dealing with the situation, you may report your compliance
issues in confidence to either:
* President of the Apache Software Foundation: Sam Ruby (rubys at intertwingly dot net)
or one of our volunteers:
* [Mark Thomas](http://home.apache.org/~markt/coc.html)
* [Joan Touzet](http://home.apache.org/~wohali/)
* [Sharan Foga](http://home.apache.org/~sharan/coc.html)
If the violation is in documentation or code, for example inappropriate pronoun usage or word choice within official documentation, we ask that people report these privately to the project in question at private@<em>project</em>.apache.org, and, if they have sufficient ability within the project, to resolve or remove the concerning material, being mindful of the perspective of the person originally reporting the issue.
## End Notes ##
This Code defines __empathy__ as "a vicarious participation in the emotions, ideas, or opinions of others; the ability to imagine oneself in the condition or predicament of another." __Empathetic__ is the adjectival form of empathy.
This statement thanks the following, on which it draws for content and inspiration:
* [CouchDB Project Code of conduct](http://couchdb.apache.org/conduct.html)
* [Fedora Project Code of Conduct](http://fedoraproject.org/code-of-conduct)
* [Django Code of Conduct](https://www.djangoproject.com/conduct/)
* [Debian Code of Conduct](http://www.debian.org/vote/2014/vote_002)
* [Twitter Open Source Code of Conduct](https://github.com/twitter/code-of-conduct/blob/master/code-of-conduct.md)
* [Mozilla Code of Conduct/Draft](https://wiki.mozilla.org/Code_of_Conduct/Draft#Conflicts_of_Interest)
* [Python Diversity Appendix](https://www.python.org/community/diversity/)
* [Python Mentors Home Page](http://pythonmentors.com/)

View File

@@ -0,0 +1,440 @@
---
title: APISIX Lua Coding Style Guide
---
<!--
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
-->
## Indentation
Use 4 spaces as an indent:
```lua
--No
if a then
ngx.say("hello")
end
```
```lua
--Yes
if a then
ngx.say("hello")
end
```
You can simplify the operation by changing the tab to 4 spaces in the editor you are using.
## Space
On both sides of the operator, you need to use a space to separate:
```lua
--No
local i=1
local s = "apisix"
```
```lua
--Yes
local i = 1
local s = "apisix"
```
## Blank line
Many developers will add a semicolon at the end of the line:
```lua
--No
if a then
    ngx.say("hello");
end;
```
Adding a semicolon will make the Lua code look ugly and unnecessary. Also, don't want to save the number of lines in the code, the latter turns the multi-line code into one line in order to appear "simple". This will not know when the positioning error is in the end of the code:
```lua
--No
if a then ngx.say("hello") end
```
```lua
--Yes
if a then
ngx.say("hello")
end
```
The functions needs to be separated by two blank lines:
```lua
--No
local function foo()
end
local function bar()
end
```
```lua
--Yes
local function foo()
end
local function bar()
end
```
If there are multiple if elseif branches, they need a blank line to separate them:
```lua
--No
if a == 1 then
foo()
elseif a== 2 then
bar()
elseif a == 3 then
run()
else
error()
end
```
```lua
--Yes
if a == 1 then
foo()
elseif a == 2 then
bar()
elseif a == 3 then
run()
else
error()
end
```
## Maximum length per line
Each line cannot exceed 100 characters. If it exceeds, you need to wrap and align:
```lua
--No
return limit_conn_new("plugin-limit-conn", conf.conn, conf.burst, conf.default_conn_delay)
```
```lua
--Yes
return limit_conn_new("plugin-limit-conn", conf.conn, conf.burst,
conf.default_conn_delay)
```
When the linefeed is aligned, the correspondence between the upper and lower lines should be reflected. For the example above, the parameters of the second line of functions are to the right of the left parenthesis of the first line.
If it is a string stitching alignment, you need to put `..` in the next line:
```lua
--No
return limit_conn_new("plugin-limit-conn" .. "plugin-limit-conn" ..
"plugin-limit-conn")
```
```lua
--Yes
return limit_conn_new("plugin-limit-conn" .. "plugin-limit-conn"
.. "plugin-limit-conn")
```
```lua
--Yes
return "param1", "plugin-limit-conn"
.. "plugin-limit-conn"
```
## Variable
Local variables should always be used, not global variables:
```lua
--No
i = 1
s = "apisix"
```
```lua
--Yes
local i = 1
local s = "apisix"
```
Variable naming uses the `snake_case` style:
```lua
--No
local IndexArr = 1
local str_Name = "apisix"
```
```lua
--Yes
local index_arr = 1
local str_name = "apisix"
```
Use all capitalization for constants:
```lua
--No
local max_int = 65535
local server_name = "apisix"
```
```lua
--Yes
local MAX_INT = 65535
local SERVER_NAME = "apisix"
```
## Table
Use `table.new` to pre-allocate the table:
```lua
--No
local t = {}
for i = 1, 100 do
t[i] = i
end
```
```lua
--Yes
local new_tab = require "table.new"
local t = new_tab(100, 0)
for i = 1, 100 do
t[i] = i
end
```
Don't use `nil` in an array:
```lua
--No
local t = {1, 2, nil, 3}
```
If you must use null values, use `ngx.null` to indicate:
```lua
--Yes
local t = {1, 2, ngx.null, 3}
```
## String
Do not splicing strings on the hot code path:
```lua
--No
local s = ""
for i = 1, 100000 do
s = s .. "a"
end
```
```lua
--Yes
local new_tab = require "table.new"
local t = new_tab(100000, 0)
for i = 1, 100000 do
t[i] = "a"
end
local s = table.concat(t, "")
```
## Function
The naming of functions also follows `snake_case`:
```lua
--No
local function testNginx()
end
```
```lua
--Yes
local function test_nginx()
end
```
The function should return as early as possible:
```lua
--No
local function check(age, name)
local ret = true
if age < 20 then
ret = false
end
if name == "a" then
ret = false
end
-- do something else
return ret
end
```
```lua
--Yes
local function check(age, name)
if age < 20 then
return false
end
if name == "a" then
return false
end
-- do something else
return true
end
```
The function should return `<boolean>`, `err`.
The first return value means successful or not, if not, the second return value specifies the error message.
The error message can be ignored in some cases.
```lua
--No
local function check()
return "failed"
end
```
```lua
--Yes
local function check()
return false, "failed"
end
```
## Module
All require libraries must be localized:
```lua
--No
local function foo()
local ok, err = ngx.timer.at(delay, handler)
end
```
```lua
--Yes
local timer_at = ngx.timer.at
local function foo()
local ok, err = timer_at(delay, handler)
end
```
For style unification, `require` and `ngx` also need to be localized:
```lua
--No
local core = require("apisix.core")
local timer_at = ngx.timer.at
local function foo()
local ok, err = timer_at(delay, handler)
end
```
```lua
--Yes
local ngx = ngx
local require = require
local core = require("apisix.core")
local timer_at = ngx.timer.at
local function foo()
local ok, err = timer_at(delay, handler)
end
```
## Error handling
For functions that return with error information, the error information must be judged and processed:
```lua
--No
local sock = ngx.socket.tcp()
local ok = sock:connect("www.google.com", 80)
ngx.say("successfully connected to google!")
```
```lua
--Yes
local sock = ngx.socket.tcp()
local ok, err = sock:connect("www.google.com", 80)
if not ok then
ngx.say("failed to connect to google: ", err)
return
end
ngx.say("successfully connected to google!")
```
The function you wrote yourself, the error message is to be returned as a second parameter in the form of a string:
```lua
--No
local function foo()
local ok, err = func()
if not ok then
return false
end
return true
end
```
```lua
--No
local function foo()
local ok, err = func()
if not ok then
return false, {msg = err}
end
return true
end
```
```lua
--Yes
local function foo()
local ok, err = func()
if not ok then
return false, "failed to call func(): " .. err
end
return true
end
```

View File

@@ -0,0 +1,152 @@
<!--
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
-->
# Contributing to APISIX
Firstly, thanks for your interest in contributing! I hope that this will be a pleasant first experience for you, and that you will return to continue
contributing.
## How to contribute?
Most of the contributions that we receive are code contributions, but you can also contribute to the documentation or simply report solid bugs for us to fix. Nor is code the only way to contribute to the project. We strongly value documentation, integration with other project, and gladly accept improvements for these aspects.
For new contributors, please take a look at issues with a tag called [Good first issue](https://github.com/apache/apisix/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) or [Help wanted](https://github.com/apache/apisix/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22).
## How to report a bug?
* **Ensure the bug was not already reported** by searching on GitHub under [Issues](https://github.com/apache/apisix/issues).
* If you're unable to find an open issue addressing the problem, [open a new one](https://github.com/apache/apisix/issues/new). Be sure to include a **title and clear description**, as much relevant information as possible, and a **code sample** or an **executable test case** demonstrating the expected behavior that is not occurring.
## How to add a new feature or change an existing one
_Before making any significant changes, please [open an issue](https://github.com/apache/apisix/issues)._ Discussing your proposed changes ahead of time will make the contribution process smooth for everyone.
Once we've discussed your changes and you've got your code ready, make sure that tests are passing and open your pull request. Your PR is most likely to be accepted if it:
* Update the README.md with details of changes to the interface.
* Includes tests for new functionality.
* References the original issue in the description, e.g. "Resolves #123".
* Has a [good commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html).
* Ensure your pull request's title starts from one of the word in the `types` section of [semantic.yml](https://github.com/apache/apisix/blob/master/.github/workflows/semantic.yml).
* Follow the [PR manners](https://raw.githubusercontent.com/apache/apisix/master/.github/PULL_REQUEST_TEMPLATE.md)
## Contribution Guidelines for Documentation
* Linting/Style
For linting both our Markdown and YAML files we use:
- npm based [markdownlint-cli](https://www.npmjs.com/package/markdownlint-cli)
For linting all files' license header we use:
- [license-eye](https://github.com/apache/skywalking-eyes)
For linting our shell files we use:
- [shellcheck](https://github.com/koalaman/shellcheck)
For linting our zh document files we use:
- [autocorrect](https://github.com/huacnlee/autocorrect)
* Active Voice
In general, use active voice when formulating the sentence instead of passive voice. A sentence written in the active voice will emphasize
the person or thing who is performing an action (eg.The dog chased the ball). In contrast, the passive voice will highlight
the recipient of the action (The ball was chased by the dog). Therefore use the passive voice, only when it's less important
who or what completed the action and more important that the action was completed. For example:
- Recommended: The key-auth plugin authenticates the requests.
- Not recommended: The requests are authenticated by the key-auth plugin.
* Capitalization:
* For titles of a section, capitalize the first letter of each word except for the [closed-class words](https://en.wikipedia.org/wiki/Part_of_speech#Open_and_closed_classes)
such as determiners, pronouns, conjunctions, and prepositions. Use the following [link](https://capitalizemytitle.com/#Chicago) for guidance.
- Recommended: Authentication **with** APISIX
* For normal sentences, don't [capitalize](https://www.grammarly.com/blog/capitalization-rules/) random words in the middle of the sentences.
Use the Chicago manual for capitalization rules for the documentation.
* Second Person
In general, use second person in your docs rather than first person. For example:
- Recommended: You are recommended to use the docker based deployment.
- Not Recommended: We recommend to use the docker based deployment.
* Spellings
Use [American spellings](https://www.oxfordinternationalenglish.com/differences-in-british-and-american-spelling/) when
contributing to the documentation.
* Voice
* Use a friendly and conversational tone. Always use simple sentences. If the sentence is lengthy try to break it in to smaller sentences.
## Check code style and test case style
* code style
* Please take a look at [APISIX Lua Coding Style Guide](CODE_STYLE.md).
* Use tool to check your code statically by command: `make lint`.
```shell
# install `luacheck` first before run it
$ luarocks install luacheck
# check source code
$ make lint
./utils/check-lua-code-style.sh
+ luacheck -q apisix t/lib
Total: 0 warnings / 0 errors in 146 files
+ find apisix -name *.lua ! -wholename apisix/cli/ngx_tpl.lua -exec ./utils/lj-releng {} +
+ grep -E ERROR.*.lua: /tmp/check.log
+ true
+ [ -s /tmp/error.log ]
./utils/check-test-code-style.sh
+ find t -name '*.t' -exec grep -E '\-\-\-\s+(SKIP|ONLY|LAST|FIRST)$' '{}' +
+ true
+ '[' -s /tmp/error.log ']'
+ find t -name '*.t' -exec ./utils/reindex '{}' +
+ grep done. /tmp/check.log
+ true
+ '[' -s /tmp/error.log ']'
```
The `lj-releng` and `reindex` will be downloaded automatically by `make lint` if not exists.
* test case style
* Use tool to check your test case style statically by command, eg: `make lint`.
* When the test file is too large, for example > 800 lines, you should split it to a new file.
Please take a look at `t/plugin/limit-conn.t` and `t/plugin/limit-conn2.t`.
* For more details, see the [testing framework](https://github.com/apache/apisix/blob/master/docs/en/latest/internal/testing-framework.md)
## Contributor gifts
If you have contributed to Apache APISIX, no matter it is a code contribution to fix a bug or a feature request, or a documentation change, Congratulations! You are eligible to receive the APISIX special gifts with a digital certificate! It's always been the community effort that has made Apache APISIX be understood and used by more developers.
![Contributor gifts](https://static.apiseven.com/2022/12/29/63acfb2f208e1.png)
Contributors can request gifts by filling out this [Google form](https://forms.gle/DhPL96LnJwuaHjHU7) or [QQ Form](https://wj.qq.com/s2/11438041/7b07/). After filling in the form, please wait patiently. The community needs some time to review submissions.
## Do you have questions about the source code?
- **QQ Group**: 781365357(recommended), 578997126, 552030619
- Join in `apisix` channel at [Apache Slack](http://s.apache.org/slack-invite). If the link is not working, find the latest one at [Apache INFRA WIKI](https://cwiki.apache.org/confluence/display/INFRA/Slack+Guest+Invites).

View File

@@ -0,0 +1,219 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=======================================================================
Apache APISIX Subcomponents:
The Apache APISIX project contains subcomponents with separate copyright
notices and license terms. Your use of the source code for the these
subcomponents is subject to the terms and conditions of the following
licenses.
========================================================================
Apache 2.0 licenses
========================================================================
The following components are provided under the Apache License. See project link for details.
The text of each license is the standard Apache 2.0 license.
ewma.lua file from kubernetes/ingress-nginx: https://github.com/kubernetes/ingress-nginx Apache 2.0
hello.go file from OpenFunction/samples: https://github.com/OpenFunction/samples Apache 2.0

View File

@@ -0,0 +1,62 @@
<!--
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
-->
## Release steps
### Release patch version
1. Create a [pull request](https://github.com/apache/apisix/commit/7db31a1a7186b966bc0f066539d4de8011871012) (contains the changelog and version change) to master
> The changelog only needs to provide a link to the minor branch.
2. Create a [pull request](https://github.com/apache/apisix/commit/21d7673c6e8ff995677456cdebc8ded5afbb3d0a) (contains the backport commits, and the change in step 1) to minor branch
> This should include those PRs that contain the `need backport` tag since the last patch release. Also, the title of these PRs need to be added to the changelog of the minor branch.
3. Merge it into minor branch
4. Package a vote artifact to Apache's dev-apisix repo. The artifact can be created via `VERSION=x.y.z make release-src`
5. Send the [vote email](https://lists.apache.org/thread/vq4qtwqro5zowpdqhx51oznbjy87w9d0) to dev@apisix.apache.org
> After executing the `VERSION=x.y.z make release-src` command, the content of the vote email will be automatically generated in the `./release` directory named `apache-apisix-${x.y.z}-vote-contents`
6. When the vote is passed, send the [vote result email](https://lists.apache.org/thread/k2frnvj4zj9oynsbr7h7nd6n6m3q5p89) to dev@apisix.apache.org
7. Move the vote artifact to Apache's apisix repo
8. Register the release info in https://reporter.apache.org/addrelease.html?apisix
9. Create a [GitHub release](https://github.com/apache/apisix/releases/tag/2.10.2) from the minor branch
10. Update [APISIX's website](https://github.com/apache/apisix-website/commit/f9104bdca50015722ab6e3714bbcd2d17e5c5bb3) if the version number is the largest
11. Update APISIX rpm package
> Go to [apisix-build-tools](https://github.com/api7/apisix-build-tools) repository and create a new tag named `apisix-${x.y.z}` to automatically submit the
package to yum repo
12. - If the version number is the largest, update [APISIX docker](https://github.com/apache/apisix-docker/commit/829d45559c303bea7edde5bebe9fcf4938071601) in [APISIX docker repository](https://github.com/apache/apisix-docker), after PR merged, then create a new branch from master, named as `release/apisix-${version}`, e.g. `release/apisix-2.10.2`.
- If released an LTS version and the version number less than the current largest(e.g. the current largest version number is 2.14.1, but the LTS version 2.13.2 is to be released), submit a PR like [APISIX docker](https://github.com/apache/apisix-docker/pull/322) in [APISIX docker repository](https://github.com/apache/apisix-docker) and named as `release/apisix-${version}`, e.g. `release/apisix-2.13.2`, after PR reviewed, don't need to merged PR, just close the PR and push the branch to APISIX docker repository.
13. Update [APISIX helm chart](https://github.com/apache/apisix-helm-chart/pull/234) if the version number is the largest
14. Send the [ANNOUNCE email](https://lists.apache.org/thread.html/ree7b06e6eac854fd42ba4f302079661a172f514a92aca2ef2f1aa7bb%40%3Cdev.apisix.apache.org%3E) to dev@apisix.apache.org & announce@apache.org
### Release minor version
1. Create a minor branch, and create [pull request](https://github.com/apache/apisix/commit/bc6ddf51f15e41fffea6c5bd7d01da9838142b66) to master branch from it
2. Package a vote artifact to Apache's dev-apisix repo. The artifact can be created via `VERSION=x.y.z make release-src`
3. Send the [vote email](https://lists.apache.org/thread/q8zq276o20r5r9qjkg074nfzb77xwry9) to dev@apisix.apache.org
> After executing the `VERSION=x.y.z make release-src` command, the content of the vote email will be automatically generated in the `./release` directory named `apache-apisix-${x.y.z}-vote-contents`
4. When the vote is passed, send the [vote result email](https://lists.apache.org/thread/p1m9s116rojlhb91g38cj8646393qkz7) to dev@apisix.apache.org
5. Move the vote artifact to Apache's apisix repo
6. Register the release info in https://reporter.apache.org/addrelease.html?apisix
7. Create a [GitHub release](https://github.com/apache/apisix/releases/tag/2.10.0) from the minor branch
8. Merge the pull request into master branch
9. Update [APISIX's website](https://github.com/apache/apisix-website/commit/7bf0ab5a1bbd795e6571c4bb89a6e646115e7ca3)
10. Update APISIX rpm package.
> Go to [apisix-build-tools](https://github.com/api7/apisix-build-tools) repository and create a new tag named `apisix-${x.y.z}` to automatically submit the rpm package to yum repo
11. - If the version number is the largest, update [APISIX docker](https://github.com/apache/apisix-docker/commit/829d45559c303bea7edde5bebe9fcf4938071601) in [APISIX docker repository](https://github.com/apache/apisix-docker), after PR merged, then create a new branch from master, named as `release/apisix-${version}`, e.g. `release/apisix-2.10.2`.
- If released an LTS version and the version number less than the current largest(e.g. the current largest version number is 2.14.1, but the LTS version 2.13.2 is to be released), submit a PR like [APISIX docker](https://github.com/apache/apisix-docker/pull/322) in [APISIX docker repository](https://github.com/apache/apisix-docker) and named as `release/apisix-${version}`, e.g. `release/apisix-2.13.2`, after PR reviewed, don't need to merged PR, just close the PR and push the branch to APISIX docker repository.
12. Update [APISIX helm chart](https://github.com/apache/apisix-helm-chart/pull/234)
13. Send the [ANNOUNCE email](https://lists.apache.org/thread/4s4msqwl1tq13p9dnv3hx7skbgpkozw1) to dev@apisix.apache.org & announce@apache.org

View File

@@ -0,0 +1,523 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Makefile basic env setting
.DEFAULT_GOAL := help
# add pipefail support for default shell
SHELL := /bin/bash -o pipefail
# Project basic setting
VERSION ?= master
project_name ?= apache-apisix
project_release_name ?= $(project_name)-$(VERSION)-src
OTEL_CONFIG ?= ./ci/pod/otelcol-contrib/data-otlp.json
# Hyperconverged Infrastructure
ENV_OS_NAME ?= $(shell uname -s | tr '[:upper:]' '[:lower:]')
ENV_OS_ARCH ?= $(shell uname -m | tr '[:upper:]' '[:lower:]')
ENV_APISIX ?= $(CURDIR)/bin/apisix
ENV_GIT ?= git
ENV_TAR ?= tar
ENV_INSTALL ?= install
ENV_RM ?= rm -vf
ENV_DOCKER ?= docker
ENV_DOCKER_COMPOSE ?= docker compose --project-directory $(CURDIR) -p $(project_name) -f $(project_compose_ci)
ENV_NGINX ?= $(ENV_NGINX_EXEC) -p $(CURDIR) -c $(CURDIR)/conf/nginx.conf
ENV_NGINX_EXEC := $(shell command -v openresty 2>/dev/null || command -v nginx 2>/dev/null)
ENV_OPENSSL_PREFIX ?= /usr/local/openresty/openssl3
ENV_LIBYAML_INSTALL_PREFIX ?= /usr
ENV_LUAROCKS ?= luarocks
## These variables can be injected by luarocks
ENV_INST_PREFIX ?= /usr
ENV_INST_LUADIR ?= $(ENV_INST_PREFIX)/share/lua/5.1
ENV_INST_BINDIR ?= $(ENV_INST_PREFIX)/bin
ENV_RUNTIME_VER ?= $(shell $(ENV_NGINX_EXEC) -V 2>&1 | tr ' ' '\n' | grep 'APISIX_RUNTIME_VER' | cut -d '=' -f2)
IMAGE_NAME = apache/apisix
ENV_APISIX_IMAGE_TAG_NAME ?= $(IMAGE_NAME):$(VERSION)
-include .requirements
export
ifneq ($(shell whoami), root)
ENV_LUAROCKS_FLAG_LOCAL := --local
endif
ifdef ENV_LUAROCKS_SERVER
ENV_LUAROCKS_SERVER_OPT := --server $(ENV_LUAROCKS_SERVER)
endif
ifneq ($(shell test -d $(ENV_OPENSSL_PREFIX) && echo -n yes), yes)
ENV_NGINX_PREFIX := $(shell $(ENV_NGINX_EXEC) -V 2>&1 | grep -Eo 'prefix=(.*)/nginx\s+' | grep -Eo '/.*/')
ifeq ($(shell test -d $(addprefix $(ENV_NGINX_PREFIX), openssl3) && echo -n yes), yes)
ENV_OPENSSL_PREFIX := $(addprefix $(ENV_NGINX_PREFIX), openssl3)
endif
endif
# Makefile basic extension function
_color_red =\E[1;31m
_color_green =\E[1;32m
_color_yellow =\E[1;33m
_color_blue =\E[1;34m
_color_wipe =\E[0m
define func_echo_status
printf "[%b info %b] %s\n" "$(_color_blue)" "$(_color_wipe)" $(1)
endef
define func_echo_warn_status
printf "[%b info %b] %s\n" "$(_color_yellow)" "$(_color_wipe)" $(1)
endef
define func_echo_success_status
printf "[%b info %b] %s\n" "$(_color_green)" "$(_color_wipe)" $(1)
endef
define func_check_folder
if [[ ! -d $(1) ]]; then \
mkdir -p $(1); \
$(call func_echo_status, 'folder check -> create `$(1)`'); \
else \
$(call func_echo_success_status, 'folder check -> found `$(1)`'); \
fi
endef
# Makefile target
.PHONY: runtime
runtime:
ifeq ($(ENV_NGINX_EXEC), )
ifeq ("$(wildcard /usr/local/openresty/bin/openresty)", "")
@$(call func_echo_warn_status, "WARNING: OpenResty not found. You have to install OpenResty and add the binary file to PATH before install Apache APISIX.")
exit 1
else
$(eval ENV_NGINX_EXEC := /usr/local/openresty/bin/openresty)
@$(call func_echo_status, "Use openresty as default runtime")
endif
endif
### help : Show Makefile rules
### If there're awk failures, please make sure
### you are using awk or gawk
.PHONY: help
help:
@$(call func_echo_success_status, "Makefile rules:")
@awk '{ if(match($$0, /^\s*#{3}\s*([^:]+)\s*:\s*(.*)$$/, res)){ printf(" make %-15s : %-10s\n", res[1], res[2]) } }' Makefile
### deps : Installing dependencies
.PHONY: deps
deps: install-runtime
$(eval ENV_LUAROCKS_VER := $(shell $(ENV_LUAROCKS) --version | grep -E -o "luarocks [0-9]+."))
@if [ '$(ENV_LUAROCKS_VER)' = 'luarocks 3.' ]; then \
mkdir -p ~/.luarocks; \
$(ENV_LUAROCKS) config $(ENV_LUAROCKS_FLAG_LOCAL) variables.OPENSSL_LIBDIR $(addprefix $(ENV_OPENSSL_PREFIX), /lib); \
$(ENV_LUAROCKS) config $(ENV_LUAROCKS_FLAG_LOCAL) variables.OPENSSL_INCDIR $(addprefix $(ENV_OPENSSL_PREFIX), /include); \
$(ENV_LUAROCKS) config $(ENV_LUAROCKS_FLAG_LOCAL) variables.YAML_DIR $(ENV_LIBYAML_INSTALL_PREFIX); \
$(ENV_LUAROCKS) install apisix-master-0.rockspec --tree deps --only-deps $(ENV_LUAROCKS_SERVER_OPT); \
else \
$(call func_echo_warn_status, "WARNING: You're not using LuaRocks 3.x; please remove the luarocks and reinstall it via https://raw.githubusercontent.com/apache/apisix/master/utils/linux-install-luarocks.sh"); \
exit 1; \
fi
### undeps : Uninstalling dependencies
.PHONY: undeps
undeps: uninstall-rocks uninstall-runtime
.PHONY: uninstall-rocks
uninstall-rocks:
@$(call func_echo_status, "$@ -> [ Start ]")
$(ENV_LUAROCKS) purge --tree=deps
@$(call func_echo_success_status, "$@ -> [ Done ]")
### utils : Installation tools
.PHONY: utils
utils:
ifeq ("$(wildcard utils/lj-releng)", "")
wget -qP utils https://raw.githubusercontent.com/iresty/openresty-devel-utils/master/lj-releng
chmod a+x utils/lj-releng
endif
ifeq ("$(wildcard utils/reindex)", "")
wget -qP utils https://raw.githubusercontent.com/iresty/openresty-devel-utils/master/reindex
chmod a+x utils/reindex
endif
### lint : Lint source code
.PHONY: lint
lint: utils
@$(call func_echo_status, "$@ -> [ Start ]")
./utils/check-lua-code-style.sh
./utils/check-test-code-style.sh
@$(call func_echo_success_status, "$@ -> [ Done ]")
### init : Initialize the runtime environment
.PHONY: init
init: runtime
@$(call func_echo_status, "$@ -> [ Start ]")
$(ENV_APISIX) init
$(ENV_APISIX) init_etcd
@$(call func_echo_success_status, "$@ -> [ Done ]")
### run : Start the apisix server
.PHONY: run
run: runtime
@$(call func_echo_status, "$@ -> [ Start ]")
$(ENV_APISIX) start
@$(call func_echo_success_status, "$@ -> [ Done ]")
### quit : Stop the apisix server, exit gracefully
.PHONY: quit
quit: runtime
@$(call func_echo_status, "$@ -> [ Start ]")
$(ENV_APISIX) quit
@$(call func_echo_success_status, "$@ -> [ Done ]")
### stop : Stop the apisix server, exit immediately
.PHONY: stop
stop: runtime
@$(call func_echo_status, "$@ -> [ Start ]")
$(ENV_APISIX) stop
@$(call func_echo_success_status, "$@ -> [ Done ]")
### verify : Verify the configuration of apisix server
.PHONY: verify
verify: runtime
@$(call func_echo_status, "$@ -> [ Start ]")
$(ENV_NGINX) -t
@$(call func_echo_success_status, "$@ -> [ Done ]")
### clean : Remove generated files
.PHONY: clean
clean:
@$(call func_echo_status, "$@ -> [ Start ]")
rm -rf logs/
@$(call func_echo_success_status, "$@ -> [ Done ]")
### reload : Reload the apisix server
.PHONY: reload
reload: runtime
@$(call func_echo_status, "$@ -> [ Start ]")
$(ENV_APISIX) reload
@$(call func_echo_success_status, "$@ -> [ Done ]")
.PHONY: install-runtime
install-runtime:
ifneq ($(ENV_RUNTIME_VER), $(APISIX_RUNTIME))
./utils/install-dependencies.sh
@sudo $(ENV_INSTALL) /usr/local/openresty/bin/openresty $(ENV_INST_BINDIR)/openresty
endif
.PHONY: uninstall-runtime
uninstall-runtime:
./utils/install-dependencies.sh uninstall
rm -rf /usr/local/openresty
rm -f $(ENV_INST_BINDIR)/openresty
### install : Install the apisix (only for luarocks)
.PHONY: install
install: runtime
$(ENV_INSTALL) -d /usr/local/apisix/
$(ENV_INSTALL) -d /usr/local/apisix/logs/
$(ENV_INSTALL) -d /usr/local/apisix/conf/cert
$(ENV_INSTALL) conf/mime.types /usr/local/apisix/conf/mime.types
$(ENV_INSTALL) conf/config.yaml /usr/local/apisix/conf/config.yaml
$(ENV_INSTALL) conf/debug.yaml /usr/local/apisix/conf/debug.yaml
$(ENV_INSTALL) conf/cert/* /usr/local/apisix/conf/cert/
# directories listed in alphabetical order
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix
$(ENV_INSTALL) apisix/*.lua $(ENV_INST_LUADIR)/apisix/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/admin
$(ENV_INSTALL) apisix/admin/*.lua $(ENV_INST_LUADIR)/apisix/admin/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/balancer
$(ENV_INSTALL) apisix/balancer/*.lua $(ENV_INST_LUADIR)/apisix/balancer/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/cli
$(ENV_INSTALL) apisix/cli/*.lua $(ENV_INST_LUADIR)/apisix/cli/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/control
$(ENV_INSTALL) apisix/control/*.lua $(ENV_INST_LUADIR)/apisix/control/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/core
$(ENV_INSTALL) apisix/core/*.lua $(ENV_INST_LUADIR)/apisix/core/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/core/dns
$(ENV_INSTALL) apisix/core/dns/*.lua $(ENV_INST_LUADIR)/apisix/core/dns
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/discovery
$(ENV_INSTALL) apisix/discovery/*.lua $(ENV_INST_LUADIR)/apisix/discovery/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/discovery/{consul,consul_kv,dns,eureka,nacos,kubernetes,tars}
$(ENV_INSTALL) apisix/discovery/consul/*.lua $(ENV_INST_LUADIR)/apisix/discovery/consul
$(ENV_INSTALL) apisix/discovery/consul_kv/*.lua $(ENV_INST_LUADIR)/apisix/discovery/consul_kv
$(ENV_INSTALL) apisix/discovery/dns/*.lua $(ENV_INST_LUADIR)/apisix/discovery/dns
$(ENV_INSTALL) apisix/discovery/eureka/*.lua $(ENV_INST_LUADIR)/apisix/discovery/eureka
$(ENV_INSTALL) apisix/discovery/kubernetes/*.lua $(ENV_INST_LUADIR)/apisix/discovery/kubernetes
$(ENV_INSTALL) apisix/discovery/nacos/*.lua $(ENV_INST_LUADIR)/apisix/discovery/nacos
$(ENV_INSTALL) apisix/discovery/tars/*.lua $(ENV_INST_LUADIR)/apisix/discovery/tars
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/http
$(ENV_INSTALL) apisix/http/*.lua $(ENV_INST_LUADIR)/apisix/http/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/http/router
$(ENV_INSTALL) apisix/http/router/*.lua $(ENV_INST_LUADIR)/apisix/http/router/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/include/apisix/model
$(ENV_INSTALL) apisix/include/apisix/model/*.proto $(ENV_INST_LUADIR)/apisix/include/apisix/model/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/inspect
$(ENV_INSTALL) apisix/inspect/*.lua $(ENV_INST_LUADIR)/apisix/inspect/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins
$(ENV_INSTALL) apisix/plugins/*.lua $(ENV_INST_LUADIR)/apisix/plugins/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/ext-plugin
$(ENV_INSTALL) apisix/plugins/ext-plugin/*.lua $(ENV_INST_LUADIR)/apisix/plugins/ext-plugin/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/grpc-transcode
$(ENV_INSTALL) apisix/plugins/grpc-transcode/*.lua $(ENV_INST_LUADIR)/apisix/plugins/grpc-transcode/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/ip-restriction
$(ENV_INSTALL) apisix/plugins/ip-restriction/*.lua $(ENV_INST_LUADIR)/apisix/plugins/ip-restriction/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/limit-conn
$(ENV_INSTALL) apisix/plugins/limit-conn/*.lua $(ENV_INST_LUADIR)/apisix/plugins/limit-conn/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/limit-req
$(ENV_INSTALL) apisix/plugins/limit-req/*.lua $(ENV_INST_LUADIR)/apisix/plugins/limit-req/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/limit-count
$(ENV_INSTALL) apisix/plugins/limit-count/*.lua $(ENV_INST_LUADIR)/apisix/plugins/limit-count/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/opa
$(ENV_INSTALL) apisix/plugins/opa/*.lua $(ENV_INST_LUADIR)/apisix/plugins/opa/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/prometheus
$(ENV_INSTALL) apisix/plugins/prometheus/*.lua $(ENV_INST_LUADIR)/apisix/plugins/prometheus/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/proxy-cache
$(ENV_INSTALL) apisix/plugins/proxy-cache/*.lua $(ENV_INST_LUADIR)/apisix/plugins/proxy-cache/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/serverless
$(ENV_INSTALL) apisix/plugins/serverless/*.lua $(ENV_INST_LUADIR)/apisix/plugins/serverless/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/syslog
$(ENV_INSTALL) apisix/plugins/syslog/*.lua $(ENV_INST_LUADIR)/apisix/plugins/syslog/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/tencent-cloud-cls
$(ENV_INSTALL) apisix/plugins/tencent-cloud-cls/*.lua $(ENV_INST_LUADIR)/apisix/plugins/tencent-cloud-cls/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/pubsub
$(ENV_INSTALL) apisix/pubsub/*.lua $(ENV_INST_LUADIR)/apisix/pubsub/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/secret
$(ENV_INSTALL) apisix/secret/*.lua $(ENV_INST_LUADIR)/apisix/secret/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/zipkin
$(ENV_INSTALL) apisix/plugins/zipkin/*.lua $(ENV_INST_LUADIR)/apisix/plugins/zipkin/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/ssl/router
$(ENV_INSTALL) apisix/ssl/router/*.lua $(ENV_INST_LUADIR)/apisix/ssl/router/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/stream
$(ENV_INSTALL) apisix/stream/*.lua $(ENV_INST_LUADIR)/apisix/stream/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/stream/plugins
$(ENV_INSTALL) apisix/stream/plugins/*.lua $(ENV_INST_LUADIR)/apisix/stream/plugins/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/stream/router
$(ENV_INSTALL) apisix/stream/router/*.lua $(ENV_INST_LUADIR)/apisix/stream/router/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/stream/xrpc
$(ENV_INSTALL) apisix/stream/xrpc/*.lua $(ENV_INST_LUADIR)/apisix/stream/xrpc/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/stream/xrpc/protocols/redis
$(ENV_INSTALL) apisix/stream/xrpc/protocols/redis/*.lua $(ENV_INST_LUADIR)/apisix/stream/xrpc/protocols/redis/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/stream/xrpc/protocols/dubbo
$(ENV_INSTALL) apisix/stream/xrpc/protocols/dubbo/*.lua $(ENV_INST_LUADIR)/apisix/stream/xrpc/protocols/dubbo/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/utils
$(ENV_INSTALL) apisix/utils/*.lua $(ENV_INST_LUADIR)/apisix/utils/
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/ai-proxy
$(ENV_INSTALL) apisix/plugins/ai-proxy/*.lua $(ENV_INST_LUADIR)/apisix/plugins/ai-proxy
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/ai-drivers
$(ENV_INSTALL) apisix/plugins/ai-drivers/*.lua $(ENV_INST_LUADIR)/apisix/plugins/ai-drivers
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/ai-rag/embeddings
$(ENV_INSTALL) apisix/plugins/ai-rag/embeddings/*.lua $(ENV_INST_LUADIR)/apisix/plugins/ai-rag/embeddings
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/ai-rag/vector-search
$(ENV_INSTALL) apisix/plugins/ai-rag/vector-search/*.lua $(ENV_INST_LUADIR)/apisix/plugins/ai-rag/vector-search
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/mcp/broker
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/mcp/transport
$(ENV_INSTALL) apisix/plugins/mcp/*.lua $(ENV_INST_LUADIR)/apisix/plugins/mcp
$(ENV_INSTALL) apisix/plugins/mcp/broker/*.lua $(ENV_INST_LUADIR)/apisix/plugins/mcp/broker
$(ENV_INSTALL) apisix/plugins/mcp/transport/*.lua $(ENV_INST_LUADIR)/apisix/plugins/mcp/transport
$(ENV_INSTALL) bin/apisix $(ENV_INST_BINDIR)/apisix
### uninstall : Uninstall the apisix
.PHONY: uninstall
uninstall:
@$(call func_echo_status, "$@ -> [ Start ]")
$(ENV_RM) -r /usr/local/apisix
$(ENV_RM) -r $(ENV_INST_LUADIR)/apisix
$(ENV_RM) $(ENV_INST_BINDIR)/apisix
@$(call func_echo_success_status, "$@ -> [ Done ]")
### test : Run the test case
.PHONY: test
test: runtime
@$(call func_echo_status, "$@ -> [ Start ]")
$(ENV_GIT) submodule update --init --recursive
prove -I../test-nginx/lib -I./ -r -s t/
@$(call func_echo_success_status, "$@ -> [ Done ]")
### license-check : Check project source code for Apache License
.PHONY: license-check
license-check:
@$(call func_echo_status, "$@ -> [ Start ]")
$(ENV_DOCKER) run -it --rm -v $(CURDIR):/github/workspace apache/skywalking-eyes header check
@$(call func_echo_success_status, "$@ -> [ Done ]")
.PHONY: release-src
release-src: compress-tar
@$(call func_echo_status, "$@ -> [ Start ]")
gpg --batch --yes --armor --detach-sig $(project_release_name).tgz
shasum -a 512 $(project_release_name).tgz > $(project_release_name).tgz.sha512
$(call func_check_folder,release)
mv $(project_release_name).tgz release/$(project_release_name).tgz
mv $(project_release_name).tgz.asc release/$(project_release_name).tgz.asc
mv $(project_release_name).tgz.sha512 release/$(project_release_name).tgz.sha512
./utils/gen-vote-contents.sh $(VERSION)
@$(call func_echo_success_status, "$@ -> [ Done ]")
.PHONY: compress-tar
compress-tar:
# The $VERSION can be major.minor.patch (from developer)
# or major.minor (from the branch name in the CI)
$(ENV_TAR) -zcvf $(project_release_name).tgz \
./apisix \
./bin \
./conf \
./apisix-master-0.rockspec \
LICENSE \
Makefile \
NOTICE \
*.md
### container
### ci-env-up : CI env launch
.PHONY: ci-env-up
ci-env-up:
@$(call func_echo_status, "$@ -> [ Start ]")
touch $(OTEL_CONFIG)
chmod 777 $(OTEL_CONFIG)
$(ENV_DOCKER_COMPOSE) up -d
@$(call func_echo_success_status, "$@ -> [ Done ]")
### ci-env-ps : CI env ps
.PHONY: ci-env-ps
ci-env-ps:
@$(call func_echo_status, "$@ -> [ Start ]")
$(ENV_DOCKER_COMPOSE) ps
@$(call func_echo_success_status, "$@ -> [ Done ]")
### ci-env-rebuild : CI env image rebuild
.PHONY: ci-env-rebuild
ci-env-rebuild:
@$(call func_echo_status, "$@ -> [ Start ]")
$(ENV_DOCKER_COMPOSE) build
@$(call func_echo_success_status, "$@ -> [ Done ]")
### ci-env-down : CI env destroy
.PHONY: ci-env-down
ci-env-down:
@$(call func_echo_status, "$@ -> [ Start ]")
rm $(OTEL_CONFIG)
$(ENV_DOCKER_COMPOSE) down
@$(call func_echo_success_status, "$@ -> [ Done ]")
### ci-env-stop : CI env temporary stop
.PHONY: ci-env-stop
ci-env-stop:
@$(call func_echo_status, "$@ -> [ Start ]")
$(ENV_DOCKER_COMPOSE) stop
@$(call func_echo_success_status, "$@ -> [ Done ]")
### build-on-debian-dev : Build apache/apisix:xx-debian-dev image
.PHONY: build-on-debian-dev
build-on-debian-dev:
@$(call func_echo_status, "$@ -> [ Start ]")
$(ENV_DOCKER) build -t $(ENV_APISIX_IMAGE_TAG_NAME)-debian-dev \
--build-arg TARGETARCH=$(ENV_OS_ARCH) \
--build-arg CODE_PATH=. \
--build-arg ENTRYPOINT_PATH=./docker/debian-dev/docker-entrypoint.sh \
--build-arg INSTALL_BROTLI=./docker/debian-dev/install-brotli.sh \
--build-arg CHECK_STANDALONE_CONFIG=./docker/utils/check_standalone_config.sh \
-f ./docker/debian-dev/Dockerfile .
@$(call func_echo_success_status, "$@ -> [ Done ]")
.PHONY: push-on-debian-dev
push-on-debian-dev:
@$(call func_echo_status, "$@ -> [ Start ]")
$(ENV_DOCKER) tag $(ENV_APISIX_IMAGE_TAG_NAME)-debian-dev $(IMAGE_NAME):dev-$(ENV_OS_ARCH)
$(ENV_DOCKER) push $(IMAGE_NAME):dev-$(ENV_OS_ARCH)
@$(call func_echo_success_status, "$@ -> [ Done ]")
### merge-dev-tags : Merge architecture-specific dev tags into a single dev tag
.PHONY: merge-dev-tags
merge-dev-tags:
@$(call func_echo_status, "$@ -> [ Start ]")
$(ENV_DOCKER) manifest create $(IMAGE_NAME):dev \
$(IMAGE_NAME):dev-amd64 \
$(IMAGE_NAME):dev-arm64
$(ENV_DOCKER) manifest push $(IMAGE_NAME):dev
@$(call func_echo_success_status, "$@ -> [ Done ]")

View File

@@ -0,0 +1,5 @@
Apache APISIX
Copyright 2019-2025 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).

View File

@@ -0,0 +1,241 @@
<!--
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
-->
# Apache APISIX API Gateway AI Gateway
<img src="./logos/apisix-white-bg.jpg" alt="APISIX logo" height="150px" align="right" />
[![Build Status](https://github.com/apache/apisix/actions/workflows/build.yml/badge.svg?branch=master)](https://github.com/apache/apisix/actions/workflows/build.yml)
[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/apache/apisix/blob/master/LICENSE)
[![Commit activity](https://img.shields.io/github/commit-activity/m/apache/apisix)](https://github.com/apache/apisix/graphs/commit-activity)
[![Average time to resolve an issue](http://isitmaintained.com/badge/resolution/apache/apisix.svg)](http://isitmaintained.com/project/apache/apisix "Average time to resolve an issue")
[![Percentage of issues still open](http://isitmaintained.com/badge/open/apache/apisix.svg)](http://isitmaintained.com/project/apache/apisix "Percentage of issues still open")
[![Slack](https://badgen.net/badge/Slack/Join%20Apache%20APISIX?icon=slack)](https://apisix.apache.org/slack)
**Apache APISIX** is a dynamic, real-time, high-performance API Gateway.
APISIX API Gateway provides rich traffic management features such as load balancing, dynamic upstream, canary release, circuit breaking, authentication, observability, and more.
APISIX can serve as an **[AI Gateway](https://apisix.apache.org/ai-gateway/)** through its flexible plugin system, providing AI proxying, load balancing for LLMs, retries and fallbacks, token-based rate limiting, and robust security to ensure the efficiency and reliability of AI agents. APISIX also provides the [`mcp-bridge`](https://apisix.apache.org/blog/2025/04/21/host-mcp-server-with-api-gateway/) plugin to seamlessly convert stdio-based MCP servers to scalable HTTP SSE services.
You can use APISIX API Gateway to handle traditional north-south traffic, as well as east-west traffic between services. It can also be used as a [k8s ingress controller](https://github.com/apache/apisix-ingress-controller).
The technical architecture of Apache APISIX:
![Technical architecture of Apache APISIX](docs/assets/images/apisix.png)
## Community
- [Kindly Write a Review](https://www.g2.com/products/apache-apisix/reviews) for APISIX in G2.
- Mailing List: Mail to dev-subscribe@apisix.apache.org, follow the reply to subscribe to the mailing list.
- Slack Workspace - [invitation link](https://apisix.apache.org/slack) (Please open an [issue](https://apisix.apache.org/docs/general/submit-issue) if this link is expired), and then join the #apisix channel (Channels -> Browse channels -> search for "apisix").
- ![Twitter Follow](https://img.shields.io/twitter/follow/ApacheAPISIX?style=social) - follow and interact with us using hashtag `#ApacheAPISIX`
- [Documentation](https://apisix.apache.org/docs/)
- [Discussions](https://github.com/apache/apisix/discussions)
- [Blog](https://apisix.apache.org/blog)
## Features
You can use APISIX API Gateway as a traffic entrance to process all business data, including dynamic routing, dynamic upstream, dynamic certificates,
A/B testing, canary release, blue-green deployment, limit rate, defense against malicious attacks, metrics, monitoring alarms, service observability, service governance, etc.
- **All platforms**
- Cloud-Native: Platform agnostic, No vendor lock-in, APISIX API Gateway can run from bare-metal to Kubernetes.
- Supports ARM64: Don't worry about the lock-in of the infra technology.
- **Multi protocols**
- [TCP/UDP Proxy](docs/en/latest/stream-proxy.md): Dynamic TCP/UDP proxy.
- [Dubbo Proxy](docs/en/latest/plugins/dubbo-proxy.md): Dynamic HTTP to Dubbo proxy.
- [Dynamic MQTT Proxy](docs/en/latest/plugins/mqtt-proxy.md): Supports to load balance MQTT by `client_id`, both support MQTT [3.1.\*](http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html), [5.0](https://docs.oasis-open.org/mqtt/mqtt/v5.0/mqtt-v5.0.html).
- [gRPC proxy](docs/en/latest/grpc-proxy.md): Proxying gRPC traffic.
- [gRPC Web Proxy](docs/en/latest/plugins/grpc-web.md): Proxying gRPC Web traffic to gRPC Service.
- [gRPC transcoding](docs/en/latest/plugins/grpc-transcode.md): Supports protocol transcoding so that clients can access your gRPC API by using HTTP/JSON.
- Proxy Websocket
- Proxy Protocol
- HTTP(S) Forward Proxy
- [SSL](docs/en/latest/certificate.md): Dynamically load an SSL certificate
- [HTTP/3 with QUIC](docs/en/latest/http3.md)
- **Full Dynamic**
- [Hot Updates And Hot Plugins](docs/en/latest/terminology/plugin.md): Continuously updates its configurations and plugins without restarts!
- [Proxy Rewrite](docs/en/latest/plugins/proxy-rewrite.md): Support rewrite the `host`, `uri`, `schema`, `method`, `headers` of the request before send to upstream.
- [Response Rewrite](docs/en/latest/plugins/response-rewrite.md): Set customized response status code, body and header to the client.
- Dynamic Load Balancing: Round-robin load balancing with weight.
- Hash-based Load Balancing: Load balance with consistent hashing sessions.
- [Health Checks](docs/en/latest/tutorials/health-check.md): Enable health check on the upstream node and will automatically filter unhealthy nodes during load balancing to ensure system stability.
- Circuit-Breaker: Intelligent tracking of unhealthy upstream services.
- [Proxy Mirror](docs/en/latest/plugins/proxy-mirror.md): Provides the ability to mirror client requests.
- [Traffic Split](docs/en/latest/plugins/traffic-split.md): Allows users to incrementally direct percentages of traffic between various upstreams.
- **Fine-grained routing**
- [Supports full path matching and prefix matching](docs/en/latest/router-radixtree.md#how-to-use-libradixtree-in-apisix)
- [Support all Nginx built-in variables as conditions for routing](docs/en/latest/router-radixtree.md#how-to-filter-route-by-nginx-builtin-variable), so you can use `cookie`, `args`, etc. as routing conditions to implement canary release, A/B testing, etc.
- Support [various operators as judgment conditions for routing](https://github.com/iresty/lua-resty-radixtree#operator-list), for example `{"arg_age", ">", 24}`
- Support [custom route matching function](https://github.com/iresty/lua-resty-radixtree/blob/master/t/filter-fun.t#L10)
- IPv6: Use IPv6 to match the route.
- Support [TTL](docs/en/latest/admin-api.md#route)
- [Support priority](docs/en/latest/router-radixtree.md#3-match-priority)
- [Support Batch Http Requests](docs/en/latest/plugins/batch-requests.md)
- [Support filtering route by GraphQL attributes](docs/en/latest/router-radixtree.md#how-to-filter-route-by-graphql-attributes)
- **Security**
- Rich authentication & authorization support:
* [key-auth](docs/en/latest/plugins/key-auth.md)
* [JWT](docs/en/latest/plugins/jwt-auth.md)
* [basic-auth](docs/en/latest/plugins/basic-auth.md)
* [wolf-rbac](docs/en/latest/plugins/wolf-rbac.md)
* [casbin](docs/en/latest/plugins/authz-casbin.md)
* [keycloak](docs/en/latest/plugins/authz-keycloak.md)
* [casdoor](docs/en/latest/plugins/authz-casdoor.md)
- [IP Whitelist/Blacklist](docs/en/latest/plugins/ip-restriction.md)
- [Referer Whitelist/Blacklist](docs/en/latest/plugins/referer-restriction.md)
- [IdP](docs/en/latest/plugins/openid-connect.md): Support external Identity platforms, such as Auth0, okta, etc..
- [Limit-req](docs/en/latest/plugins/limit-req.md)
- [Limit-count](docs/en/latest/plugins/limit-count.md)
- [Limit-concurrency](docs/en/latest/plugins/limit-conn.md)
- Anti-ReDoS(Regular expression Denial of Service): Built-in policies to Anti ReDoS without configuration.
- [CORS](docs/en/latest/plugins/cors.md) Enable CORS(Cross-origin resource sharing) for your API.
- [URI Blocker](docs/en/latest/plugins/uri-blocker.md): Block client request by URI.
- [Request Validator](docs/en/latest/plugins/request-validation.md)
- [CSRF](docs/en/latest/plugins/csrf.md) Based on the [`Double Submit Cookie`](https://en.wikipedia.org/wiki/Cross-site_request_forgery#Double_Submit_Cookie) way, protect your API from CSRF attacks.
- **OPS friendly**
- Zipkin tracing: [Zipkin](docs/en/latest/plugins/zipkin.md)
- Open source APM: support [Apache SkyWalking](docs/en/latest/plugins/skywalking.md)
- Works with external service discovery: In addition to the built-in etcd, it also supports [Consul](docs/en/latest/discovery/consul.md), [Consul_kv](docs/en/latest/discovery/consul_kv.md), [Nacos](docs/en/latest/discovery/nacos.md), [Eureka](docs/en/latest/discovery/eureka.md) and [Zookeeper (CP)](https://github.com/api7/apisix-seed/blob/main/docs/en/latest/zookeeper.md).
- Monitoring And Metrics: [Prometheus](docs/en/latest/plugins/prometheus.md)
- Clustering: APISIX nodes are stateless, creates clustering of the configuration center, please refer to [etcd Clustering Guide](https://etcd.io/docs/v3.5/op-guide/clustering/).
- High availability: Support to configure multiple etcd addresses in the same cluster.
- [Dashboard](https://github.com/apache/apisix-dashboard)
- Version Control: Supports rollbacks of operations.
- CLI: start\stop\reload APISIX through the command line.
- [Standalone](docs/en/latest/deployment-modes.md#standalone): Supports to load route rules from local YAML file, it is more friendly such as under the kubernetes(k8s).
- [Global Rule](docs/en/latest/terminology/global-rule.md): Allows to run any plugin for all request, eg: limit rate, IP filter etc.
- High performance: The single-core QPS reaches 18k with an average delay of fewer than 0.2 milliseconds.
- [Fault Injection](docs/en/latest/plugins/fault-injection.md)
- [REST Admin API](docs/en/latest/admin-api.md): Using the REST Admin API to control Apache APISIX, which only allows 127.0.0.1 access by default, you can modify the `allow_admin` field in `conf/config.yaml` to specify a list of IPs that are allowed to call the Admin API. Also, note that the Admin API uses key auth to verify the identity of the caller.
- External Loggers: Export access logs to external log management tools. ([HTTP Logger](docs/en/latest/plugins/http-logger.md), [TCP Logger](docs/en/latest/plugins/tcp-logger.md), [Kafka Logger](docs/en/latest/plugins/kafka-logger.md), [UDP Logger](docs/en/latest/plugins/udp-logger.md), [RocketMQ Logger](docs/en/latest/plugins/rocketmq-logger.md), [SkyWalking Logger](docs/en/latest/plugins/skywalking-logger.md), [Alibaba Cloud Logging(SLS)](docs/en/latest/plugins/sls-logger.md), [Google Cloud Logging](docs/en/latest/plugins/google-cloud-logging.md), [Splunk HEC Logging](docs/en/latest/plugins/splunk-hec-logging.md), [File Logger](docs/en/latest/plugins/file-logger.md), [SolarWinds Loggly Logging](docs/en/latest/plugins/loggly.md), [TencentCloud CLS](docs/en/latest/plugins/tencent-cloud-cls.md)).
- [ClickHouse](docs/en/latest/plugins/clickhouse-logger.md): push logs to ClickHouse.
- [Elasticsearch](docs/en/latest/plugins/elasticsearch-logger.md): push logs to Elasticsearch.
- [Datadog](docs/en/latest/plugins/datadog.md): push custom metrics to the DogStatsD server, comes bundled with [Datadog agent](https://docs.datadoghq.com/agent/), over the UDP protocol. DogStatsD basically is an implementation of StatsD protocol which collects the custom metrics for Apache APISIX agent, aggregates it into a single data point and sends it to the configured Datadog server.
- [Helm charts](https://github.com/apache/apisix-helm-chart)
- [HashiCorp Vault](https://www.vaultproject.io/): Support secret management solution for accessing secrets from Vault secure storage backed in a low trust environment. Currently, RS256 keys (public-private key pairs) or secret keys can be linked from vault in jwt-auth authentication plugin using [APISIX Secret](docs/en/latest/terminology/secret.md) resource.
- **Highly scalable**
- [Custom plugins](docs/en/latest/plugin-develop.md): Allows hooking of common phases, such as `rewrite`, `access`, `header filter`, `body filter` and `log`, also allows to hook the `balancer` stage.
- [Plugin can be written in Java/Go/Python](docs/en/latest/external-plugin.md)
- [Plugin can be written with Proxy Wasm SDK](docs/en/latest/wasm.md)
- Custom load balancing algorithms: You can use custom load balancing algorithms during the `balancer` phase.
- Custom routing: Support users to implement routing algorithms themselves.
- **Multi-Language support**
- Apache APISIX is a multi-language gateway for plugin development and provides support via `RPC` and `Wasm`.
![Multi Language Support into Apache APISIX](docs/assets/images/external-plugin.png)
- The RPC way, is the current way. Developers can choose the language according to their needs and after starting an independent process with the RPC, it exchanges data with APISIX through local RPC communication. Till this moment, APISIX has support for [Java](https://github.com/apache/apisix-java-plugin-runner), [Golang](https://github.com/apache/apisix-go-plugin-runner), [Python](https://github.com/apache/apisix-python-plugin-runner) and Node.js.
- The Wasm or WebAssembly, is an experimental way. APISIX can load and run Wasm bytecode via APISIX [wasm plugin](https://github.com/apache/apisix/blob/master/docs/en/latest/wasm.md) written with the [Proxy Wasm SDK](https://github.com/proxy-wasm/spec#sdks). Developers only need to write the code according to the SDK and then compile it into a Wasm bytecode that runs on Wasm VM with APISIX.
- **Serverless**
- [Lua functions](docs/en/latest/plugins/serverless.md): Invoke functions in each phase in APISIX.
- [AWS Lambda](docs/en/latest/plugins/aws-lambda.md): Integration with AWS Lambda function as a dynamic upstream to proxy all requests for a particular URI to the AWS API gateway endpoint. Supports authorization via api key and AWS IAM access secret.
- [Azure Functions](docs/en/latest/plugins/azure-functions.md): Seamless integration with Azure Serverless Function as a dynamic upstream to proxy all requests for a particular URI to the Microsoft Azure cloud.
- [Apache OpenWhisk](docs/en/latest/plugins/openwhisk.md): Seamless integration with Apache OpenWhisk as a dynamic upstream to proxy all requests for a particular URI to your own OpenWhisk cluster.
## Get Started
1. Installation
Please refer to [install documentation](https://apisix.apache.org/docs/apisix/installation-guide/).
2. Getting started
The getting started guide is a great way to learn the basics of APISIX. Just follow the steps in [Getting Started](https://apisix.apache.org/docs/apisix/getting-started/).
Further, you can follow the documentation to try more [plugins](docs/en/latest/plugins).
3. Admin API
Apache APISIX provides [REST Admin API](docs/en/latest/admin-api.md) to dynamically control the Apache APISIX cluster.
4. Plugin development
You can refer to [plugin development guide](docs/en/latest/plugin-develop.md), and sample plugin `example-plugin`'s code implementation.
Reading [plugin concept](docs/en/latest/terminology/plugin.md) would help you learn more about the plugin.
For more documents, please refer to [Apache APISIX Documentation site](https://apisix.apache.org/docs/apisix/getting-started/)
## Benchmark
Using AWS's eight-core server, APISIX's QPS reaches 140,000 with a latency of only 0.2 ms.
[Benchmark script](benchmark/run.sh) has been open sourced, welcome to try and contribute.
[APISIX also works perfectly in AWS graviton3 C7g.](https://apisix.apache.org/blog/2022/06/07/installation-performance-test-of-apigateway-apisix-on-aws-graviton3)
## User Stories
- [European eFactory Platform: API Security Gateway Using APISIX in the eFactory Platform](https://www.efactory-project.eu/post/api-security-gateway-using-apisix-in-the-efactory-platform)
- [Copernicus Reference System Software](https://github.com/COPRS/infrastructure/wiki/Networking-trade-off)
- [More Stories](https://apisix.apache.org/blog/tags/case-studies/)
## Who Uses APISIX API Gateway?
A wide variety of companies and organizations use APISIX API Gateway for research, production and commercial product, below are some of them:
- Airwallex
- Bilibili
- CVTE
- European eFactory Platform
- European Copernicus Reference System
- Geely
- HONOR
- Horizon Robotics
- iQIYI
- Lenovo
- NASA JPL
- Nayuki
- OPPO
- QingCloud
- Swisscom
- Tencent Game
- Travelsky
- vivo
- Sina Weibo
- WeCity
- WPS
- XPENG
- Zoom
## Logos
- [Apache APISIX logo(PNG)](https://github.com/apache/apisix/tree/master/logos/apache-apisix.png)
- [Apache APISIX logo source](https://apache.org/logos/#apisix)
## Acknowledgments
Inspired by Kong and Orange.
## License
[Apache 2.0 License](https://github.com/apache/apisix/tree/master/LICENSE)

View File

@@ -0,0 +1,60 @@
<!--
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
-->
## Threat Model
Here is the threat model of Apache APISIX, which is relative to our developers and operators.
### Where the system might be attacked
As a proxy, Apache APISIX needs to be able to run in front of untrusted downstream traffic.
However, some features need to assume the downstream traffic is trusted. They should be either
not exposed to the internet by default (for example, listening to 127.0.0.1), or disclaim in
the doc explicitly.
As Apache APISIX is evolving rapidly, some newly added features may not be strong enough to defend against potential attacks.
Therefore, we need to divide the features into two groups: premature and mature ones.
Features that are just merged in half a year or are declared as experimental are premature.
Premature features are not fully tested on the battlefield and are not covered by the security policy normally.
Additionally, we require the components below are trustable:
1. the upstream
2. the configuration
3. the way we relay the configuration
4. the 3rd party components involved in the Apache APISIX, for example, the authorization server
### How can we reduce the likelihood or impact of a potential threat
As the user:
First of all, don't expose the components which are required to be trustable to the internet, including the control plane (Dashboard or something else) and the configuration relay mechanism (etcd or etcd adapter or something else).
Then, harden the trusted components. For example,
1. if possible, enable authentication or use https for the etcd
2. read the doc and disable plugins that are not needed, so that we can reduce the attack vector
3. restrict and audit the change of configuration
As the developer:
We should keep security in mind, and validate the input from the client before use.
As the maintainer:
We should keep security in mind, and review the code line by line.
We are open to discussion from the security researchers.

View File

@@ -0,0 +1,40 @@
<!--
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
-->
### Vision
Apache APISIX is an open source API gateway designed to help developers connect any APIs securely and efficiently in any environment.
Managing thousands or tens of thousands of APIs and microservices in a multi-cloud and hybrid cloud environment is not an easy task.
There will be many challenges as authentication, observability, security, etc.
Apache APISIX, a community-driven project, hopes to help everyone better manage and use APIs through the power of developers.
Every developer's contribution will used by thousands of companies and served by billions of users.
### Milestones
Apache APISIX has relatively complete features for north-south traffic,
and will be iterated around the following directions in the next 6 months (if you have any ideas, feel free to create issue to discuss):
- More complete support for Gateway API on APISIX ingress controller
- Add support for service mesh
- User-friendly documentation
- More plugins for public cloud and SaaS services
- Java/Go plugins and Wasm production-ready
- Add dynamic debugging tools for Apache APISIX

View File

@@ -0,0 +1,108 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
package = "apisix"
version = "master-0"
supported_platforms = {"linux"}
source = {
url = "git://github.com/apache/apisix",
branch = "master",
}
description = {
summary = "Apache APISIX is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices.",
homepage = "https://github.com/apache/apisix",
license = "Apache License 2.0",
}
dependencies = {
"lua-resty-ctxdump = 0.1-0",
"lyaml = 6.2.8",
"api7-lua-resty-dns-client = 7.0.1",
"lua-resty-template = 2.0",
"lua-resty-etcd = 1.10.5",
"api7-lua-resty-http = 0.2.2-0",
"lua-resty-balancer = 0.04",
"lua-resty-ngxvar = 0.5.2",
"lua-resty-jit-uuid = 0.0.7",
"lua-resty-worker-events = 1.0.0",
"lua-resty-healthcheck-api7 = 3.2.0",
"api7-lua-resty-jwt = 0.2.5",
"lua-resty-hmac-ffi = 0.06-1",
"lua-resty-cookie = 0.2.0-1",
"lua-resty-session = 3.10",
"opentracing-openresty = 0.1",
"lua-resty-radixtree = 2.9.2",
"lua-protobuf = 0.5.2-1",
"lua-resty-openidc = 1.7.6-3",
"luafilesystem = 1.7.0-2",
"nginx-lua-prometheus-api7 = 0.20240201-1",
"jsonschema = 0.9.8",
"lua-resty-ipmatcher = 0.6.1",
"lua-resty-kafka = 0.23-0",
"lua-resty-logger-socket = 2.0.1-0",
"skywalking-nginx-lua = 1.0.1",
"base64 = 1.5-2",
"binaryheap = 0.4",
"api7-dkjson = 0.1.1",
"resty-redis-cluster = 1.05-1",
"lua-resty-expr = 1.3.2",
"graphql = 0.0.2",
"argparse = 0.7.1-1",
"luasocket = 3.1.0-1",
"luasec = 1.3.2-1",
"lua-resty-consul = 0.3-2",
"penlight = 1.13.1",
"ext-plugin-proto = 0.6.1",
"casbin = 1.41.9-1",
"inspect == 3.1.1",
"lua-resty-rocketmq = 0.3.0-0",
"opentelemetry-lua = 0.2-3",
"net-url = 0.9-1",
"xml2lua = 1.5-2",
"nanoid = 0.1-1",
"lua-resty-mediador = 0.1.2-1",
"lua-resty-ldap = 0.1.0-0",
"lua-resty-t1k = 1.1.5",
"brotli-ffi = 0.3-1",
"lua-ffi-zlib = 0.6-0",
"jsonpath = 1.0-1",
"api7-lua-resty-aws == 2.0.2-1",
"multipart = 0.5.9-1",
}
build = {
type = "make",
build_variables = {
CFLAGS="$(CFLAGS)",
LIBFLAG="$(LIBFLAG)",
LUA_LIBDIR="$(LUA_LIBDIR)",
LUA_BINDIR="$(LUA_BINDIR)",
LUA_INCDIR="$(LUA_INCDIR)",
LUA="$(LUA)",
OPENSSL_INCDIR="$(OPENSSL_INCDIR)",
OPENSSL_LIBDIR="$(OPENSSL_LIBDIR)",
},
install_variables = {
ENV_INST_PREFIX="$(PREFIX)",
ENV_INST_BINDIR="$(BINDIR)",
ENV_INST_LIBDIR="$(LIBDIR)",
ENV_INST_LUADIR="$(LUADIR)",
ENV_INST_CONFDIR="$(CONFDIR)",
},
}

View File

@@ -0,0 +1,66 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local consumers = require("apisix.consumer").consumers
local resource = require("apisix.admin.resource")
local schema_plugin = require("apisix.admin.plugins").check_schema
local type = type
local tostring = tostring
local ipairs = ipairs
local function check_conf(id, conf, need_id, schema)
local ok, err = core.schema.check(schema, conf)
if not ok then
return nil, {error_msg = "invalid configuration: " .. err}
end
local ok, err = schema_plugin(conf.plugins)
if not ok then
return nil, {error_msg = err}
end
return true
end
local function delete_checker(id)
local consumers, consumers_ver = consumers()
if consumers_ver and consumers then
for _, consumer in ipairs(consumers) do
if type(consumer) == "table" and consumer.value
and consumer.value.group_id
and tostring(consumer.value.group_id) == id then
return 400, {error_msg = "can not delete this consumer group,"
.. " consumer [" .. consumer.value.id
.. "] is still using it now"}
end
end
end
return nil, nil
end
return resource.new({
name = "consumer_groups",
kind = "consumer group",
schema = core.schema.consumer_group,
checker = check_conf,
unsupported_methods = {"post"},
delete_checker = delete_checker
})

View File

@@ -0,0 +1,65 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local plugins = require("apisix.admin.plugins")
local resource = require("apisix.admin.resource")
local function check_conf(username, conf, need_username, schema)
local ok, err = core.schema.check(schema, conf)
if not ok then
return nil, {error_msg = "invalid configuration: " .. err}
end
if username and username ~= conf.username then
return nil, {error_msg = "wrong username" }
end
if conf.plugins then
ok, err = plugins.check_schema(conf.plugins, core.schema.TYPE_CONSUMER)
if not ok then
return nil, {error_msg = "invalid plugins configuration: " .. err}
end
end
if conf.group_id then
local key = "/consumer_groups/" .. conf.group_id
local res, err = core.etcd.get(key)
if not res then
return nil, {error_msg = "failed to fetch consumer group info by "
.. "consumer group id [" .. conf.group_id .. "]: "
.. err}
end
if res.status ~= 200 then
return nil, {error_msg = "failed to fetch consumer group info by "
.. "consumer group id [" .. conf.group_id .. "], "
.. "response code: " .. res.status}
end
end
return conf.username
end
return resource.new({
name = "consumers",
kind = "consumer",
schema = core.schema.consumer,
checker = check_conf,
unsupported_methods = {"post", "patch"}
})

View File

@@ -0,0 +1,74 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local plugins = require("apisix.admin.plugins")
local plugin = require("apisix.plugin")
local resource = require("apisix.admin.resource")
local pairs = pairs
local function check_conf(_id, conf, _need_id, schema)
local ok, err = core.schema.check(schema, conf)
if not ok then
return nil, {error_msg = "invalid configuration: " .. err}
end
if conf.plugins then
ok, err = plugins.check_schema(conf.plugins, core.schema.TYPE_CONSUMER)
if not ok then
return nil, {error_msg = "invalid plugins configuration: " .. err}
end
for name, _ in pairs(conf.plugins) do
local plugin_obj = plugin.get(name)
if not plugin_obj then
return nil, {error_msg = "unknown plugin " .. name}
end
if plugin_obj.type ~= "auth" then
return nil, {error_msg = "only supports auth type plugins in consumer credential"}
end
end
end
return true, nil
end
-- get_credential_etcd_key is used to splice the credential's etcd key (without prefix)
-- from credential_id and sub_path.
-- Parameter credential_id is from the uri or payload; sub_path is in the form of
-- {consumer_name}/credentials or {consumer_name}/credentials/{credential_id}.
-- Only if GET credentials list, credential_id is nil, sub_path is like {consumer_name}/credentials,
-- so return value is /consumers/{consumer_name}/credentials.
-- In the other methods, credential_id is not nil, return value is
-- /consumers/{consumer_name}/credentials/{credential_id}.
local function get_credential_etcd_key(credential_id, _conf, sub_path, _args)
if credential_id then
local uri_segs = core.utils.split_uri(sub_path)
local consumer_name = uri_segs[1]
return "/consumers/" .. consumer_name .. "/credentials/" .. credential_id
end
return "/consumers/" .. sub_path
end
return resource.new({
name = "credentials",
kind = "credential",
schema = core.schema.credential,
checker = check_conf,
get_resource_etcd_key = get_credential_etcd_key,
unsupported_methods = {"post", "patch"}
})

View File

@@ -0,0 +1,43 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local resource = require("apisix.admin.resource")
local schema_plugin = require("apisix.admin.plugins").check_schema
local function check_conf(id, conf, need_id, schema)
local ok, err = core.schema.check(schema, conf)
if not ok then
return nil, {error_msg = "invalid configuration: " .. err}
end
local ok, err = schema_plugin(conf.plugins)
if not ok then
return nil, {error_msg = err}
end
return true
end
return resource.new({
name = "global_rules",
kind = "global rule",
schema = core.schema.global_rule,
checker = check_conf,
unsupported_methods = {"post"}
})

View File

@@ -0,0 +1,526 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local require = require
local core = require("apisix.core")
local get_uri_args = ngx.req.get_uri_args
local route = require("apisix.utils.router")
local plugin = require("apisix.plugin")
local standalone = require("apisix.admin.standalone")
local v3_adapter = require("apisix.admin.v3_adapter")
local utils = require("apisix.admin.utils")
local ngx = ngx
local get_method = ngx.req.get_method
local ngx_time = ngx.time
local ngx_timer_at = ngx.timer.at
local ngx_worker_id = ngx.worker.id
local tonumber = tonumber
local tostring = tostring
local str_lower = string.lower
local reload_event = "/apisix/admin/plugins/reload"
local ipairs = ipairs
local error = error
local type = type
local events
local MAX_REQ_BODY = 1024 * 1024 * 1.5 -- 1.5 MiB
local viewer_methods = {
get = true,
}
local resources = {
routes = require("apisix.admin.routes"),
services = require("apisix.admin.services"),
upstreams = require("apisix.admin.upstreams"),
consumers = require("apisix.admin.consumers"),
credentials = require("apisix.admin.credentials"),
schema = require("apisix.admin.schema"),
ssls = require("apisix.admin.ssl"),
plugins = require("apisix.admin.plugins"),
protos = require("apisix.admin.proto"),
global_rules = require("apisix.admin.global_rules"),
stream_routes = require("apisix.admin.stream_routes"),
plugin_metadata = require("apisix.admin.plugin_metadata"),
plugin_configs = require("apisix.admin.plugin_config"),
consumer_groups = require("apisix.admin.consumer_group"),
secrets = require("apisix.admin.secrets"),
}
local _M = {version = 0.4}
local router
local function check_token(ctx)
local local_conf = core.config.local_conf()
-- check if admin_key is required
if local_conf.deployment.admin.admin_key_required == false then
return true
end
local admin_key = core.table.try_read_attr(local_conf, "deployment", "admin", "admin_key")
if not admin_key then
return true
end
local req_token = ctx.var.arg_api_key or ctx.var.http_x_api_key
or ctx.var.cookie_x_api_key
if not req_token then
return false, "missing apikey"
end
local admin
for i, row in ipairs(admin_key) do
if req_token == row.key then
admin = row
break
end
end
if not admin then
return false, "wrong apikey"
end
if admin.role == "viewer" and
not viewer_methods[str_lower(get_method())] then
return false, "invalid method for role viewer"
end
return true
end
-- Set the `apictx` variable and check admin api token, if the check fails, the current
-- request will be interrupted and an error response will be returned.
--
-- NOTE: This is a higher wrapper for `check_token` function.
local function set_ctx_and_check_token()
local api_ctx = {}
core.ctx.set_vars_meta(api_ctx)
ngx.ctx.api_ctx = api_ctx
local ok, err = check_token(api_ctx)
if not ok then
core.log.warn("failed to check token: ", err)
core.response.exit(401, { error_msg = "failed to check token", description = err })
end
end
local function strip_etcd_resp(data)
if type(data) == "table"
and data.header ~= nil
and data.header.revision ~= nil
and data.header.raft_term ~= nil
then
-- strip etcd data
data.header = nil
data.responses = nil
data.succeeded = nil
if data.node then
data.node.createdIndex = nil
data.node.modifiedIndex = nil
end
data.count = nil
data.more = nil
data.prev_kvs = nil
if data.deleted then
-- We used to treat the type incorrectly. But for compatibility we follow
-- the existing type.
data.deleted = tostring(data.deleted)
end
end
return data
end
local function head()
core.response.exit(200)
end
local function run()
set_ctx_and_check_token()
local uri_segs = core.utils.split_uri(ngx.var.uri)
core.log.info("uri: ", core.json.delay_encode(uri_segs))
-- /apisix/admin/schema/route
local seg_res, seg_id = uri_segs[4], uri_segs[5]
local seg_sub_path = core.table.concat(uri_segs, "/", 6)
if seg_res == "schema" and seg_id == "plugins" then
-- /apisix/admin/schema/plugins/limit-count
seg_res, seg_id = uri_segs[5], uri_segs[6]
seg_sub_path = core.table.concat(uri_segs, "/", 7)
end
if seg_res == "stream_routes" then
local local_conf = core.config.local_conf()
if local_conf.apisix.proxy_mode ~= "stream" and
local_conf.apisix.proxy_mode ~= "http&stream" then
core.log.warn("stream mode is disabled, can not add any stream ",
"routes")
core.response.exit(400, {error_msg = "stream mode is disabled, " ..
"can not add stream routes"})
end
end
if seg_res == "consumers" and #uri_segs >= 6 and uri_segs[6] == "credentials" then
seg_sub_path = seg_id .. "/" .. seg_sub_path
seg_res = uri_segs[6]
seg_id = uri_segs[7]
end
local resource = resources[seg_res]
if not resource then
core.response.exit(404, {error_msg = "Unsupported resource type: ".. seg_res})
end
local method = str_lower(get_method())
if not resource[method] then
core.response.exit(404, {error_msg = "not found"})
end
local req_body, err = core.request.get_body(MAX_REQ_BODY)
if err then
core.log.error("failed to read request body: ", err)
core.response.exit(400, {error_msg = "invalid request body: " .. err})
end
if req_body then
local data, err = core.json.decode(req_body)
if err then
core.log.error("invalid request body: ", req_body, " err: ", err)
core.response.exit(400, {error_msg = "invalid request body: " .. err,
req_body = req_body})
end
req_body = data
end
local uri_args = ngx.req.get_uri_args() or {}
if uri_args.ttl then
if not tonumber(uri_args.ttl) then
core.response.exit(400, {error_msg = "invalid argument ttl: "
.. "should be a number"})
end
end
local code, data
if seg_res == "schema" or seg_res == "plugins" then
code, data = resource[method](seg_id, req_body, seg_sub_path, uri_args)
else
code, data = resource[method](resource, seg_id, req_body, seg_sub_path, uri_args)
end
if code then
if method == "get" and plugin.enable_data_encryption then
if seg_res == "consumers" or seg_res == "credentials" then
utils.decrypt_params(plugin.decrypt_conf, data, core.schema.TYPE_CONSUMER)
elseif seg_res == "plugin_metadata" then
utils.decrypt_params(plugin.decrypt_conf, data, core.schema.TYPE_METADATA)
else
utils.decrypt_params(plugin.decrypt_conf, data)
end
end
if v3_adapter.enable_v3() then
core.response.set_header("X-API-VERSION", "v3")
else
core.response.set_header("X-API-VERSION", "v2")
end
data = v3_adapter.filter(data, resource)
data = strip_etcd_resp(data)
core.response.exit(code, data)
end
end
local function get_plugins_list()
set_ctx_and_check_token()
local args = get_uri_args()
local subsystem = args["subsystem"]
-- If subsystem is passed then it should be either http or stream.
-- If it is not passed/nil then http will be default.
subsystem = subsystem or "http"
if subsystem == "http" or subsystem == "stream" then
local plugins = resources.plugins.get_plugins_list(subsystem)
core.response.exit(200, plugins)
end
core.response.exit(400,"invalid subsystem passed")
end
-- Handle unsupported request methods for the virtual "reload" plugin
local function unsupported_methods_reload_plugin()
set_ctx_and_check_token()
core.response.exit(405, {
error_msg = "please use PUT method to reload the plugins, "
.. get_method() .. " method is not allowed."
})
end
local function post_reload_plugins()
set_ctx_and_check_token()
local success, err = events:post(reload_event, get_method(), ngx_time())
if not success then
core.response.exit(503, err)
end
core.response.exit(200, "done")
end
local function plugins_eq(old, new)
local old_set = {}
for _, p in ipairs(old) do
old_set[p.name] = p
end
local new_set = {}
for _, p in ipairs(new) do
new_set[p.name] = p
end
return core.table.set_eq(old_set, new_set)
end
local function sync_local_conf_to_etcd(reset)
local local_conf = core.config.local_conf()
local plugins = {}
for _, name in ipairs(local_conf.plugins) do
core.table.insert(plugins, {
name = name,
})
end
for _, name in ipairs(local_conf.stream_plugins) do
core.table.insert(plugins, {
name = name,
stream = true,
})
end
if reset then
local res, err = core.etcd.get("/plugins")
if not res then
core.log.error("failed to get current plugins: ", err)
return
end
if res.status == 404 then
-- nothing need to be reset
return
end
if res.status ~= 200 then
core.log.error("failed to get current plugins, status: ", res.status)
return
end
local stored_plugins = res.body.node.value
local revision = res.body.node.modifiedIndex
if plugins_eq(stored_plugins, plugins) then
core.log.info("plugins not changed, don't need to reset")
return
end
core.log.warn("sync local conf to etcd")
local res, err = core.etcd.atomic_set("/plugins", plugins, nil, revision)
if not res then
core.log.error("failed to set plugins: ", err)
end
return
end
core.log.warn("sync local conf to etcd")
-- need to store all plugins name into one key so that it can be updated atomically
local res, err = core.etcd.set("/plugins", plugins)
if not res then
core.log.error("failed to set plugins: ", err)
end
end
local function reload_plugins(data, event, source, pid)
core.log.info("start to hot reload plugins")
plugin.load()
if ngx_worker_id() == 0 then
sync_local_conf_to_etcd()
end
end
local function schema_validate()
local uri_segs = core.utils.split_uri(ngx.var.uri)
core.log.info("uri: ", core.json.delay_encode(uri_segs))
local seg_res = uri_segs[6]
local resource = resources[seg_res]
if not resource then
core.response.exit(404, {error_msg = "Unsupported resource type: ".. seg_res})
end
local req_body, err = core.request.get_body(MAX_REQ_BODY)
if err then
core.log.error("failed to read request body: ", err)
core.response.exit(400, {error_msg = "invalid request body: " .. err})
end
if req_body then
local data, err = core.json.decode(req_body)
if err then
core.log.error("invalid request body: ", req_body, " err: ", err)
core.response.exit(400, {error_msg = "invalid request body: " .. err,
req_body = req_body})
end
req_body = data
end
local ok, err = core.schema.check(resource.schema, req_body)
if ok then
core.response.exit(200)
end
core.response.exit(400, {error_msg = err})
end
local function standalone_run()
set_ctx_and_check_token()
return standalone.run()
end
local http_head_route = {
paths = [[/apisix/admin]],
methods = {"HEAD"},
handler = head,
}
local uri_route = {
http_head_route,
{
paths = [[/apisix/admin/*]],
methods = {"GET", "PUT", "POST", "DELETE", "PATCH"},
handler = run,
},
{
paths = [[/apisix/admin/plugins/list]],
methods = {"GET"},
handler = get_plugins_list,
},
{
paths = [[/apisix/admin/schema/validate/*]],
methods = {"POST"},
handler = schema_validate,
},
{
paths = reload_event,
methods = {"PUT"},
handler = post_reload_plugins,
},
-- Handle methods other than "PUT" on "/plugin/reload" to inform user
{
paths = reload_event,
methods = { "GET", "POST", "DELETE", "PATCH" },
handler = unsupported_methods_reload_plugin,
},
}
local standalone_uri_route = {
http_head_route,
{
paths = [[/apisix/admin/configs]],
methods = {"GET", "PUT"},
handler = standalone_run,
},
}
function _M.init_worker()
local local_conf = core.config.local_conf()
if not local_conf.apisix or not local_conf.apisix.enable_admin then
return
end
local is_yaml_config_provider = local_conf.deployment.config_provider == "yaml"
if is_yaml_config_provider then
router = route.new(standalone_uri_route)
standalone.init_worker()
else
router = route.new(uri_route)
end
-- register reload plugin handler
events = require("apisix.events")
events:register(reload_plugins, reload_event, "PUT")
if ngx_worker_id() == 0 then
-- check if admin_key is required
if local_conf.deployment.admin.admin_key_required == false then
core.log.warn("Admin key is bypassed! ",
"If you are deploying APISIX in a production environment, ",
"please enable `admin_key_required` and set a secure admin key!")
end
if is_yaml_config_provider then -- standalone mode does not need sync to etcd
return
end
local ok, err = ngx_timer_at(0, function(premature)
if premature then
return
end
-- try to reset the /plugins to the current configuration in the admin
sync_local_conf_to_etcd(true)
end)
if not ok then
error("failed to sync local configure to etcd: " .. err)
end
end
end
function _M.get()
return router
end
return _M

View File

@@ -0,0 +1,66 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local get_routes = require("apisix.router").http_routes
local resource = require("apisix.admin.resource")
local schema_plugin = require("apisix.admin.plugins").check_schema
local type = type
local tostring = tostring
local ipairs = ipairs
local function check_conf(id, conf, need_id, schema)
local ok, err = core.schema.check(schema, conf)
if not ok then
return nil, {error_msg = "invalid configuration: " .. err}
end
local ok, err = schema_plugin(conf.plugins)
if not ok then
return nil, {error_msg = err}
end
return true
end
local function delete_checker(id)
local routes, routes_ver = get_routes()
if routes_ver and routes then
for _, route in ipairs(routes) do
if type(route) == "table" and route.value
and route.value.plugin_config_id
and tostring(route.value.plugin_config_id) == id then
return 400, {error_msg = "can not delete this plugin config,"
.. " route [" .. route.value.id
.. "] is still using it now"}
end
end
end
return nil, nil
end
return resource.new({
name = "plugin_configs",
kind = "plugin config",
schema = core.schema.plugin_config,
checker = check_conf,
unsupported_methods = {"post"},
delete_checker = delete_checker
})

View File

@@ -0,0 +1,83 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local pcall = pcall
local require = require
local core = require("apisix.core")
local resource = require("apisix.admin.resource")
local encrypt_conf = require("apisix.plugin").encrypt_conf
local injected_mark = "injected metadata_schema"
local function validate_plugin(name)
local pkg_name = "apisix.plugins." .. name
local ok, plugin_object = pcall(require, pkg_name)
if ok then
return true, plugin_object
end
pkg_name = "apisix.stream.plugins." .. name
return pcall(require, pkg_name)
end
local function check_conf(plugin_name, conf)
if not plugin_name then
return nil, {error_msg = "missing plugin name"}
end
local ok, plugin_object = validate_plugin(plugin_name)
if not ok then
return nil, {error_msg = "invalid plugin name"}
end
if not plugin_object.metadata_schema then
plugin_object.metadata_schema = {
type = "object",
['$comment'] = injected_mark,
properties = {},
}
end
local schema = plugin_object.metadata_schema
local ok, err
if schema['$comment'] == injected_mark
-- check_schema is not required. If missing, fallback to check schema directly
or not plugin_object.check_schema
then
ok, err = core.schema.check(schema, conf)
else
ok, err = plugin_object.check_schema(conf, core.schema.TYPE_METADATA)
end
encrypt_conf(plugin_name, conf, core.schema.TYPE_METADATA)
if not ok then
return nil, {error_msg = "invalid configuration: " .. err}
end
return plugin_name
end
return resource.new({
name = "plugin_metadata",
kind = "plugin_metadata",
schema = core.schema.plugin_metadata,
checker = check_conf,
unsupported_methods = {"post", "patch"}
})

View File

@@ -0,0 +1,139 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local require = require
local core = require("apisix.core")
local check_schema = require("apisix.plugin").check_schema
local ipairs = ipairs
local table_sort = table.sort
local table_insert = table.insert
local get_uri_args = ngx.req.get_uri_args
local plugin_get_all = require("apisix.plugin").get_all
local plugin_get_http = require("apisix.plugin").get
local plugin_get_stream = require("apisix.plugin").get_stream
local encrypt_conf = require("apisix.plugin").encrypt_conf
local pairs = pairs
local _M = {}
function _M.check_schema(plugins_conf, schema_type)
local ok, err = check_schema(plugins_conf, schema_type, false)
if ok then
for name, conf in pairs(plugins_conf) do
encrypt_conf(name, conf, schema_type)
end
end
return ok, err
end
function _M.get(name)
local arg = get_uri_args()
-- If subsystem is passed inside args then it should be oneOf: http / stream.
local subsystem = arg["subsystem"] or "http"
if subsystem ~= "http" and subsystem ~= "stream" then
return 400, {error_msg = "unsupported subsystem: "..subsystem}
end
-- arg all to be deprecated
if (arg and arg["all"] == "true") then
core.log.warn("query parameter \"all\" will be deprecated soon.")
local http_plugins, stream_plugins = plugin_get_all({
version = true,
priority = true,
schema = true,
metadata_schema = true,
consumer_schema = true,
type = true,
scope = true,
})
if arg["subsystem"] == "stream" then
return 200, stream_plugins
end
return 200, http_plugins
end
local plugin
if subsystem == "http" then
plugin = plugin_get_http(name)
else
plugin = plugin_get_stream(name)
end
if not plugin then
local err = "plugin not found in subsystem " .. subsystem
core.log.warn(err)
return 404, {error_msg = err}
end
local json_schema = plugin.schema
if arg and arg["schema_type"] == "consumer" then
json_schema = plugin.consumer_schema
end
if not json_schema then
return 400, {error_msg = "not found schema"}
end
return 200, json_schema
end
function _M.get_plugins_list(subsystem)
local http_plugins
local stream_plugins
if subsystem == "http" then
http_plugins = core.config.local_conf().plugins
else
stream_plugins = core.config.local_conf().stream_plugins
end
local priorities = {}
local success = {}
if http_plugins then
for i, name in ipairs(http_plugins) do
local plugin = plugin_get_http(name)
if plugin and plugin.priority then
priorities[name] = plugin.priority
table_insert(success, name)
end
end
end
if stream_plugins then
for i, name in ipairs(stream_plugins) do
local plugin = plugin_get_stream(name)
if plugin and plugin.priority then
priorities[name] = plugin.priority
table_insert(success, name)
end
end
end
local function cmp(x, y)
return priorities[x] > priorities[y]
end
table_sort(success, cmp)
return success
end
return _M

View File

@@ -0,0 +1,111 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local type = type
local ipairs = ipairs
local core = require("apisix.core")
local resource = require("apisix.admin.resource")
local get_routes = require("apisix.router").http_routes
local get_services = require("apisix.http.service").services
local compile_proto = require("apisix.plugins.grpc-transcode.proto").compile_proto
local tostring = tostring
local function check_conf(id, conf, need_id, schema)
local ok, err = core.schema.check(schema, conf)
if not ok then
return nil, {error_msg = "invalid configuration: " .. err}
end
local ok, err = compile_proto(conf.content)
if not ok then
return nil, {error_msg = "invalid content: " .. err}
end
return true
end
local function check_proto_used(plugins, deleting, ptype, pid)
--core.log.info("check_proto_used plugins: ", core.json.delay_encode(plugins, true))
--core.log.info("check_proto_used deleting: ", deleting)
--core.log.info("check_proto_used ptype: ", ptype)
--core.log.info("check_proto_used pid: ", pid)
if plugins then
if type(plugins) == "table" and plugins["grpc-transcode"]
and plugins["grpc-transcode"].proto_id
and tostring(plugins["grpc-transcode"].proto_id) == deleting then
return false, {error_msg = "can not delete this proto, "
.. ptype .. " [" .. pid
.. "] is still using it now"}
end
end
return true
end
local function delete_checker(id)
core.log.info("proto delete: ", id)
local routes, routes_ver = get_routes()
core.log.info("routes: ", core.json.delay_encode(routes, true))
core.log.info("routes_ver: ", routes_ver)
if routes_ver and routes then
for _, route in ipairs(routes) do
core.log.info("proto delete route item: ", core.json.delay_encode(route, true))
if type(route) == "table" and route.value and route.value.plugins then
local ret, err = check_proto_used(route.value.plugins, id, "route",route.value.id)
if not ret then
return 400, err
end
end
end
end
core.log.info("proto delete route ref check pass: ", id)
local services, services_ver = get_services()
core.log.info("services: ", core.json.delay_encode(services, true))
core.log.info("services_ver: ", services_ver)
if services_ver and services then
for _, service in ipairs(services) do
if type(service) == "table" and service.value and service.value.plugins then
local ret, err = check_proto_used(service.value.plugins, id,
"service", service.value.id)
if not ret then
return 400, err
end
end
end
end
core.log.info("proto delete service ref check pass: ", id)
return nil, nil
end
return resource.new({
name = "protos",
kind = "proto",
schema = core.schema.proto,
checker = check_conf,
unsupported_methods = {"patch"},
delete_checker = delete_checker
})

View File

@@ -0,0 +1,468 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local utils = require("apisix.admin.utils")
local apisix_ssl = require("apisix.ssl")
local apisix_consumer = require("apisix.consumer")
local setmetatable = setmetatable
local tostring = tostring
local ipairs = ipairs
local type = type
local _M = {
list_filter_fields = {},
}
local mt = {
__index = _M
}
local no_id_res = {
consumers = true,
plugin_metadata = true
}
local function split_typ_and_id(id, sub_path)
local uri_segs = core.utils.split_uri(sub_path)
local typ = id
local id = nil
if #uri_segs > 0 then
id = uri_segs[1]
end
return typ, id
end
local function check_forbidden_properties(conf, forbidden_properties)
local not_allow_properties = "the property is forbidden: "
if conf then
for _, v in ipairs(forbidden_properties) do
if conf[v] then
return not_allow_properties .. " " .. v
end
end
if conf.upstream then
for _, v in ipairs(forbidden_properties) do
if conf.upstream[v] then
return not_allow_properties .. " upstream." .. v
end
end
end
if conf.plugins then
for _, v in ipairs(forbidden_properties) do
if conf.plugins[v] then
return not_allow_properties .. " plugins." .. v
end
end
end
end
return nil
end
function _M:check_conf(id, conf, need_id, typ, allow_time)
if self.name == "secrets" then
id = typ .. "/" .. id
end
-- check if missing configurations
if not conf then
return nil, {error_msg = "missing configurations"}
end
-- check id if need id
if not no_id_res[self.name] then
id = id or conf.id
if need_id and not id then
return nil, {error_msg = "missing ".. self.kind .. " id"}
end
if not need_id and id then
return nil, {error_msg = "wrong ".. self.kind .. " id, do not need it"}
end
if need_id and conf.id and tostring(conf.id) ~= tostring(id) then
return nil, {error_msg = "wrong ".. self.kind .. " id"}
end
conf.id = id
end
-- check create time and update time
if not allow_time then
local forbidden_properties = {"create_time", "update_time"}
local err = check_forbidden_properties(conf, forbidden_properties)
if err then
return nil, {error_msg = err}
end
end
core.log.info("conf : ", core.json.delay_encode(conf))
-- check the resource own rules
if self.name ~= "secrets" then
core.log.info("schema: ", core.json.delay_encode(self.schema))
end
local ok, err = self.checker(id, conf, need_id, self.schema, typ)
if not ok then
return ok, err
else
if no_id_res[self.name] then
return ok
else
return need_id and id or true
end
end
end
function _M:get(id, conf, sub_path)
if core.table.array_find(self.unsupported_methods, "get") then
return 405, {error_msg = "not supported `GET` method for " .. self.kind}
end
local key = "/" .. self.name
local typ = nil
if self.name == "secrets" then
key = key .. "/"
typ, id = split_typ_and_id(id, sub_path)
end
if id then
if self.name == "secrets" then
key = key .. typ
end
key = key .. "/" .. id
end
-- some resources(consumers) have sub resources(credentials),
-- the key format of sub resources will differ from the main resource
if self.get_resource_etcd_key then
key = self.get_resource_etcd_key(id, conf, sub_path)
end
local res, err = core.etcd.get(key, not id)
if not res then
core.log.error("failed to get ", self.kind, "[", key, "] from etcd: ", err)
return 503, {error_msg = err}
end
if self.name == "ssls" then
-- not return private key for security
if res.body and res.body.node and res.body.node.value then
res.body.node.value.key = nil
end
end
-- consumers etcd range response will include credentials, so need to filter out them
if self.name == "consumers" and res.body.list then
res.body.list = apisix_consumer.filter_consumers_list(res.body.list)
res.body.total = #res.body.list
end
utils.fix_count(res.body, id)
return res.status, res.body
end
function _M:post(id, conf, sub_path, args)
if core.table.array_find(self.unsupported_methods, "post") then
return 405, {error_msg = "not supported `POST` method for " .. self.kind}
end
local id, err = self:check_conf(id, conf, false)
if not id then
return 400, err
end
if self.name == "ssls" then
-- encrypt private key
conf.key = apisix_ssl.aes_encrypt_pkey(conf.key)
if conf.keys then
for i = 1, #conf.keys do
conf.keys[i] = apisix_ssl.aes_encrypt_pkey(conf.keys[i])
end
end
end
local key = "/" .. self.name
utils.inject_timestamp(conf)
local ttl = nil
if args then
ttl = args.ttl
end
local res, err = core.etcd.push(key, conf, ttl)
if not res then
core.log.error("failed to post ", self.kind, "[", key, "] to etcd: ", err)
return 503, {error_msg = err}
end
return res.status, res.body
end
function _M:put(id, conf, sub_path, args)
if core.table.array_find(self.unsupported_methods, "put") then
return 405, {error_msg = "not supported `PUT` method for " .. self.kind}
end
local key = "/" .. self.name
local typ = nil
if self.name == "secrets" then
typ, id = split_typ_and_id(id, sub_path)
key = key .. "/" .. typ
end
local need_id = not no_id_res[self.name]
local ok, err = self:check_conf(id, conf, need_id, typ)
if not ok then
return 400, err
end
if self.name ~= "secrets" then
id = ok
end
if self.name == "ssls" then
-- encrypt private key
conf.key = apisix_ssl.aes_encrypt_pkey(conf.key)
if conf.keys then
for i = 1, #conf.keys do
conf.keys[i] = apisix_ssl.aes_encrypt_pkey(conf.keys[i])
end
end
end
key = key .. "/" .. id
if self.get_resource_etcd_key then
key = self.get_resource_etcd_key(id, conf, sub_path, args)
end
if self.name == "credentials" then
local consumer_key = apisix_consumer.get_consumer_key_from_credential_key(key)
local res, err = core.etcd.get(consumer_key, false)
if not res then
return 503, {error_msg = err}
end
if res.status == 404 then
return res.status, {error_msg = "consumer not found"}
end
if res.status ~= 200 then
core.log.debug("failed to get consumer for the credential, credential key: ", key,
", consumer key: ", consumer_key, ", res.status: ", res.status)
return res.status, {error_msg = "failed to get the consumer"}
end
end
if self.name ~= "plugin_metadata" then
local ok, err = utils.inject_conf_with_prev_conf(self.kind, key, conf)
if not ok then
return 503, {error_msg = err}
end
else
conf.id = id
end
local ttl = nil
if args then
ttl = args.ttl
end
local res, err = core.etcd.set(key, conf, ttl)
if not res then
core.log.error("failed to put ", self.kind, "[", key, "] to etcd: ", err)
return 503, {error_msg = err}
end
return res.status, res.body
end
-- Keep the unused conf to make the args list consistent with other methods
function _M:delete(id, conf, sub_path, uri_args)
if core.table.array_find(self.unsupported_methods, "delete") then
return 405, {error_msg = "not supported `DELETE` method for " .. self.kind}
end
local key = "/" .. self.name
local typ = nil
if self.name == "secrets" then
typ, id = split_typ_and_id(id, sub_path)
end
if not id then
return 400, {error_msg = "missing " .. self.kind .. " id"}
end
-- core.log.error("failed to delete ", self.kind, "[", key, "] in etcd: ", err)
if self.name == "secrets" then
key = key .. "/" .. typ
end
key = key .. "/" .. id
if self.get_resource_etcd_key then
key = self.get_resource_etcd_key(id, conf, sub_path, uri_args)
end
if self.delete_checker and uri_args.force ~= "true" then
local code, err = self.delete_checker(id)
if err then
return code, err
end
end
if self.name == "consumers" then
local res, err = core.etcd.rmdir(key .. "/credentials/")
if not res then
return 503, {error_msg = err}
end
end
local res, err = core.etcd.delete(key)
if not res then
core.log.error("failed to delete ", self.kind, "[", key, "] in etcd: ", err)
return 503, {error_msg = err}
end
return res.status, res.body
end
function _M:patch(id, conf, sub_path, args)
if core.table.array_find(self.unsupported_methods, "patch") then
return 405, {error_msg = "not supported `PATCH` method for " .. self.kind}
end
local key = "/" .. self.name
local typ = nil
if self.name == "secrets" then
local uri_segs = core.utils.split_uri(sub_path)
if #uri_segs < 1 then
return 400, {error_msg = "no secret id"}
end
typ = id
id = uri_segs[1]
sub_path = core.table.concat(uri_segs, "/", 2)
end
if not id then
return 400, {error_msg = "missing " .. self.kind .. " id"}
end
if self.name == "secrets" then
key = key .. "/" .. typ
end
key = key .. "/" .. id
if conf == nil then
return 400, {error_msg = "missing new configuration"}
end
if not sub_path or sub_path == "" then
if type(conf) ~= "table" then
return 400, {error_msg = "invalid configuration"}
end
end
local res_old, err = core.etcd.get(key)
if not res_old then
core.log.error("failed to get ", self.kind, " [", key, "] in etcd: ", err)
return 503, {error_msg = err}
end
if res_old.status ~= 200 then
return res_old.status, res_old.body
end
core.log.info("key: ", key, " old value: ", core.json.delay_encode(res_old, true))
local node_value = res_old.body.node.value
local modified_index = res_old.body.node.modifiedIndex
if sub_path and sub_path ~= "" then
if self.name == "ssls" then
if sub_path == "key" then
conf = apisix_ssl.aes_encrypt_pkey(conf)
elseif sub_path == "keys" then
for i = 1, #conf do
conf[i] = apisix_ssl.aes_encrypt_pkey(conf[i])
end
end
end
local code, err, node_val = core.table.patch(node_value, sub_path, conf)
node_value = node_val
if code then
return code, {error_msg = err}
end
utils.inject_timestamp(node_value, nil, true)
else
if self.name == "ssls" then
if conf.key then
conf.key = apisix_ssl.aes_encrypt_pkey(conf.key)
end
if conf.keys then
for i = 1, #conf.keys do
conf.keys[i] = apisix_ssl.aes_encrypt_pkey(conf.keys[i])
end
end
end
node_value = core.table.merge(node_value, conf)
utils.inject_timestamp(node_value, nil, conf)
end
core.log.info("new conf: ", core.json.delay_encode(node_value, true))
local ok, err = self:check_conf(id, node_value, true, typ, true)
if not ok then
return 400, err
end
local ttl = nil
if args then
ttl = args.ttl
end
local res, err = core.etcd.atomic_set(key, node_value, ttl, modified_index)
if not res then
core.log.error("failed to set new ", self.kind, "[", key, "] to etcd: ", err)
return 503, {error_msg = err}
end
return res.status, res.body
end
function _M.new(opt)
return setmetatable(opt, mt)
end
return _M

View File

@@ -0,0 +1,184 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local expr = require("resty.expr.v1")
local core = require("apisix.core")
local apisix_upstream = require("apisix.upstream")
local resource = require("apisix.admin.resource")
local schema_plugin = require("apisix.admin.plugins").check_schema
local type = type
local loadstring = loadstring
local ipairs = ipairs
local jp = require("jsonpath")
local function validate_post_arg(node)
if type(node) ~= "table" then
return true
end
-- Handle post_arg conditions
if #node >= 3 and type(node[1]) == "string" and node[1]:find("^post_arg%.") then
local key = node[1]
local json_path = "$." .. key:sub(11) -- Remove "post_arg." prefix
local _, err = jp.parse(json_path)
if err then
return false, err
end
return true
end
for _, child in ipairs(node) do
local ok, err = validate_post_arg(child)
if not ok then
return false, err
end
end
return true
end
local function check_conf(id, conf, need_id, schema)
if conf.host and conf.hosts then
return nil, {error_msg = "only one of host or hosts is allowed"}
end
if conf.remote_addr and conf.remote_addrs then
return nil, {error_msg = "only one of remote_addr or remote_addrs is "
.. "allowed"}
end
local ok, err = core.schema.check(schema, conf)
if not ok then
return nil, {error_msg = "invalid configuration: " .. err}
end
local upstream_conf = conf.upstream
if upstream_conf then
local ok, err = apisix_upstream.check_upstream_conf(upstream_conf)
if not ok then
return nil, {error_msg = err}
end
end
local upstream_id = conf.upstream_id
if upstream_id then
local key = "/upstreams/" .. upstream_id
local res, err = core.etcd.get(key)
if not res then
return nil, {error_msg = "failed to fetch upstream info by "
.. "upstream id [" .. upstream_id .. "]: "
.. err}
end
if res.status ~= 200 then
return nil, {error_msg = "failed to fetch upstream info by "
.. "upstream id [" .. upstream_id .. "], "
.. "response code: " .. res.status}
end
end
local service_id = conf.service_id
if service_id then
local key = "/services/" .. service_id
local res, err = core.etcd.get(key)
if not res then
return nil, {error_msg = "failed to fetch service info by "
.. "service id [" .. service_id .. "]: "
.. err}
end
if res.status ~= 200 then
return nil, {error_msg = "failed to fetch service info by "
.. "service id [" .. service_id .. "], "
.. "response code: " .. res.status}
end
end
local plugin_config_id = conf.plugin_config_id
if plugin_config_id then
local key = "/plugin_configs/" .. plugin_config_id
local res, err = core.etcd.get(key)
if not res then
return nil, {error_msg = "failed to fetch plugin config info by "
.. "plugin config id [" .. plugin_config_id .. "]: "
.. err}
end
if res.status ~= 200 then
return nil, {error_msg = "failed to fetch plugin config info by "
.. "plugin config id [" .. plugin_config_id .. "], "
.. "response code: " .. res.status}
end
end
if conf.plugins then
local ok, err = schema_plugin(conf.plugins)
if not ok then
return nil, {error_msg = err}
end
end
if conf.vars then
ok, err = expr.new(conf.vars)
if not ok then
return nil, {error_msg = "failed to validate the 'vars' expression: " .. err}
end
end
ok, err = validate_post_arg(conf.vars)
if not ok then
return nil, {error_msg = "failed to validate the 'vars' expression: " ..
err}
end
if conf.filter_func then
local func, err = loadstring("return " .. conf.filter_func)
if not func then
return nil, {error_msg = "failed to load 'filter_func' string: "
.. err}
end
if type(func()) ~= "function" then
return nil, {error_msg = "'filter_func' should be a function"}
end
end
if conf.script then
local obj, err = loadstring(conf.script)
if not obj then
return nil, {error_msg = "failed to load 'script' string: "
.. err}
end
if type(obj()) ~= "table" then
return nil, {error_msg = "'script' should be a Lua object"}
end
end
return true
end
return resource.new({
name = "routes",
kind = "route",
schema = core.schema.route,
checker = check_conf,
list_filter_fields = {
service_id = true,
upstream_id = true,
},
})

View File

@@ -0,0 +1,35 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local _M = {
version = 0.1,
}
function _M.get(name)
local json_schema = core.schema[name]
core.log.info("schema: ", core.json.delay_encode(core.schema, true))
if not json_schema then
return 400, {error_msg = "not found schema: " .. name}
end
return 200, json_schema
end
return _M

View File

@@ -0,0 +1,45 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local require = require
local core = require("apisix.core")
local resource = require("apisix.admin.resource")
local pcall = pcall
local function check_conf(id, conf, need_id, schema, typ)
local ok, secret_manager = pcall(require, "apisix.secret." .. typ)
if not ok then
return false, {error_msg = "invalid secret manager: " .. typ}
end
local ok, err = core.schema.check(secret_manager.schema, conf)
if not ok then
return nil, {error_msg = "invalid configuration: " .. err}
end
return true
end
return resource.new({
name = "secrets",
kind = "secret",
checker = check_conf,
unsupported_methods = {"post"}
})

View File

@@ -0,0 +1,128 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local get_routes = require("apisix.router").http_routes
local get_stream_routes = require("apisix.router").stream_routes
local apisix_upstream = require("apisix.upstream")
local resource = require("apisix.admin.resource")
local schema_plugin = require("apisix.admin.plugins").check_schema
local tostring = tostring
local ipairs = ipairs
local type = type
local loadstring = loadstring
local function check_conf(id, conf, need_id, schema)
local ok, err = core.schema.check(schema, conf)
if not ok then
return nil, {error_msg = "invalid configuration: " .. err}
end
if need_id and not id then
return nil, {error_msg = "wrong type of service id"}
end
local upstream_conf = conf.upstream
if upstream_conf then
local ok, err = apisix_upstream.check_upstream_conf(upstream_conf)
if not ok then
return nil, {error_msg = err}
end
end
local upstream_id = conf.upstream_id
if upstream_id then
local key = "/upstreams/" .. upstream_id
local res, err = core.etcd.get(key)
if not res then
return nil, {error_msg = "failed to fetch upstream info by "
.. "upstream id [" .. upstream_id .. "]: "
.. err}
end
if res.status ~= 200 then
return nil, {error_msg = "failed to fetch upstream info by "
.. "upstream id [" .. upstream_id .. "], "
.. "response code: " .. res.status}
end
end
if conf.plugins then
local ok, err = schema_plugin(conf.plugins)
if not ok then
return nil, {error_msg = err}
end
end
if conf.script then
local obj, err = loadstring(conf.script)
if not obj then
return nil, {error_msg = "failed to load 'script' string: "
.. err}
end
if type(obj()) ~= "table" then
return nil, {error_msg = "'script' should be a Lua object"}
end
end
return true
end
local function delete_checker(id)
local routes, routes_ver = get_routes()
core.log.info("routes: ", core.json.delay_encode(routes, true))
core.log.info("routes_ver: ", routes_ver)
if routes_ver and routes then
for _, route in ipairs(routes) do
if type(route) == "table" and route.value
and route.value.service_id
and tostring(route.value.service_id) == id then
return 400, {error_msg = "can not delete this service directly,"
.. " route [" .. route.value.id
.. "] is still using it now"}
end
end
end
local stream_routes, stream_routes_ver = get_stream_routes()
core.log.info("stream_routes: ", core.json.delay_encode(stream_routes, true))
core.log.info("stream_routes_ver: ", stream_routes_ver)
if stream_routes_ver and stream_routes then
for _, route in ipairs(stream_routes) do
if type(route) == "table" and route.value
and route.value.service_id
and tostring(route.value.service_id) == id then
return 400, {error_msg = "can not delete this service directly,"
.. " stream_route [" .. route.value.id
.. "] is still using it now"}
end
end
end
return nil, nil
end
return resource.new({
name = "services",
kind = "service",
schema = core.schema.service,
checker = check_conf,
delete_checker = delete_checker
})

View File

@@ -0,0 +1,37 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local resource = require("apisix.admin.resource")
local apisix_ssl = require("apisix.ssl")
local function check_conf(id, conf, need_id, schema)
local ok, err = apisix_ssl.check_ssl_conf(false, conf)
if not ok then
return nil, {error_msg = err}
end
return need_id and id or true
end
return resource.new({
name = "ssls",
kind = "ssl",
schema = core.schema.ssl,
checker = check_conf
})

View File

@@ -0,0 +1,339 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local type = type
local pairs = pairs
local ipairs = ipairs
local str_lower = string.lower
local ngx = ngx
local get_method = ngx.req.get_method
local shared_dict = ngx.shared["standalone-config"]
local table_insert = table.insert
local table_new = require("table.new")
local yaml = require("lyaml")
local events = require("apisix.events")
local core = require("apisix.core")
local config_yaml = require("apisix.core.config_yaml")
local check_schema = require("apisix.core.schema").check
local tbl_deepcopy = require("apisix.core.table").deepcopy
local EVENT_UPDATE = "standalone-api-configuration-update"
local _M = {}
local function check_duplicate(item, key, id_set)
local identifier, identifier_type
if key == "consumers" then
identifier = item.id or item.username
identifier_type = item.id and "credential id" or "username"
else
identifier = item.id
identifier_type = "id"
end
if id_set[identifier] then
return true, "found duplicate " .. identifier_type .. " " .. identifier .. " in " .. key
end
id_set[identifier] = true
return false
end
local function get_config()
local config = shared_dict:get("config")
if not config then
return nil, "not found"
end
local err
config, err = core.json.decode(config)
if not config then
return nil, "failed to decode json: " .. err
end
return config
end
local function update_and_broadcast_config(apisix_yaml)
local raw, err = core.json.encode(apisix_yaml)
if not raw then
core.log.error("failed to encode json: ", err)
return nil, "failed to encode json: " .. err
end
if shared_dict then
-- the worker that handles Admin API calls is responsible for writing the shared dict
local ok, err = shared_dict:set("config", raw)
if not ok then
return nil, "failed to save config to shared dict: " .. err
end
core.log.info("standalone config updated: ", raw)
else
core.log.crit(config_yaml.ERR_NO_SHARED_DICT)
end
return events:post(EVENT_UPDATE, EVENT_UPDATE)
end
local function update(ctx)
local content_type = core.request.header(nil, "content-type") or "application/json"
-- read the request body
local req_body, err = core.request.get_body()
if err then
return core.response.exit(400, {error_msg = "invalid request body: " .. err})
end
if not req_body or #req_body <= 0 then
return core.response.exit(400, {error_msg = "invalid request body: empty request body"})
end
-- parse the request body
local data
if core.string.has_prefix(content_type, "application/yaml") then
data = yaml.load(req_body, { all = false })
if not data or type(data) ~= "table" then
err = "invalid yaml request body"
end
else
data, err = core.json.decode(req_body)
end
if err then
core.log.error("invalid request body: ", req_body, " err: ", err)
core.response.exit(400, {error_msg = "invalid request body: " .. err})
end
req_body = data
local config, err = get_config()
if not config then
if err ~= "not found" then
core.log.error("failed to get config from shared dict: ", err)
return core.response.exit(500, {
error_msg = "failed to get config from shared dict: " .. err
})
end
end
-- check input by jsonschema
local apisix_yaml = {}
local created_objs = config_yaml.fetch_all_created_obj()
for key, obj in pairs(created_objs) do
local conf_version_key = obj.conf_version_key
local conf_version = config and config[conf_version_key] or obj.conf_version
local items = req_body[key]
local new_conf_version = req_body[conf_version_key]
if not new_conf_version then
new_conf_version = conf_version + 1
else
if type(new_conf_version) ~= "number" then
return core.response.exit(400, {
error_msg = conf_version_key .. " must be a number",
})
end
if new_conf_version < conf_version then
return core.response.exit(400, {
error_msg = conf_version_key ..
" must be greater than or equal to (" .. conf_version .. ")",
})
end
end
apisix_yaml[conf_version_key] = new_conf_version
if new_conf_version == conf_version then
apisix_yaml[key] = config and config[key]
elseif items and #items > 0 then
apisix_yaml[key] = table_new(#items, 0)
local item_schema = obj.item_schema
local item_checker = obj.checker
local id_set = {}
for index, item in ipairs(items) do
local item_temp = tbl_deepcopy(item)
local valid, err
-- need to recover to 0-based subscript
local err_prefix = "invalid " .. key .. " at index " .. (index - 1) .. ", err: "
if item_schema then
valid, err = check_schema(obj.item_schema, item_temp)
if not valid then
core.log.error(err_prefix, err)
core.response.exit(400, {error_msg = err_prefix .. err})
end
end
if item_checker then
local item_checker_key
if item.id then
-- credential need to check key
item_checker_key = "/" .. key .. "/" .. item_temp.id
end
valid, err = item_checker(item_temp, item_checker_key)
if not valid then
core.log.error(err_prefix, err)
core.response.exit(400, {error_msg = err_prefix .. err})
end
end
-- prevent updating resource with the same ID
-- (e.g., service ID or other resource IDs) in a single request
local duplicated, err = check_duplicate(item, key, id_set)
if duplicated then
core.log.error(err)
core.response.exit(400, { error_msg = err })
end
table_insert(apisix_yaml[key], item)
end
end
end
local ok, err = update_and_broadcast_config(apisix_yaml)
if not ok then
core.response.exit(500, err)
end
return core.response.exit(202)
end
local function get(ctx)
local accept = core.request.header(nil, "accept") or "application/json"
local want_yaml_resp = core.string.has_prefix(accept, "application/yaml")
local config, err = get_config()
if not config then
if err ~= "not found" then
core.log.error("failed to get config from shared dict: ", err)
return core.response.exit(500, {
error_msg = "failed to get config from shared dict: " .. err
})
end
config = {}
local created_objs = config_yaml.fetch_all_created_obj()
for _, obj in pairs(created_objs) do
config[obj.conf_version_key] = obj.conf_version
end
end
local resp, err
if want_yaml_resp then
core.response.set_header("Content-Type", "application/yaml")
resp = yaml.dump({ config })
if not resp then
err = "failed to encode yaml"
end
-- remove the first line "---" and the last line "..."
-- because the yaml.dump() will add them for multiple documents
local m = ngx.re.match(resp, [[^---\s*([\s\S]*?)\s*\.\.\.\s*$]], "jo")
if m and m[1] then
resp = m[1]
end
else
core.response.set_header("Content-Type", "application/json")
resp, err = core.json.encode(config, true)
if not resp then
err = "failed to encode json: " .. err
end
end
if not resp then
return core.response.exit(500, {error_msg = err})
end
return core.response.exit(200, resp)
end
function _M.run()
local ctx = ngx.ctx.api_ctx
local method = str_lower(get_method())
if method == "put" then
return update(ctx)
else
return get(ctx)
end
end
local patch_schema
do
local resource_schema = {
"proto",
"global_rule",
"route",
"service",
"upstream",
"consumer",
"consumer_group",
"credential",
"ssl",
"plugin_config",
}
local function attach_modifiedIndex_schema(name)
local schema = core.schema[name]
if not schema then
core.log.error("schema for ", name, " not found")
return
end
if schema.properties and not schema.properties.modifiedIndex then
schema.properties.modifiedIndex = {
type = "integer",
}
end
end
local function patch_credential_schema()
local credential_schema = core.schema["credential"]
if credential_schema and credential_schema.properties then
credential_schema.properties.id = {
type = "string",
minLength = 15,
maxLength = 128,
pattern = [[^[a-zA-Z0-9-_]+/credentials/[a-zA-Z0-9-_.]+$]],
}
end
end
function patch_schema()
-- attach modifiedIndex schema to all resource schemas
for _, name in ipairs(resource_schema) do
attach_modifiedIndex_schema(name)
end
-- patch credential schema
patch_credential_schema()
end
end
function _M.init_worker()
local function update_config()
local config, err = shared_dict:get("config")
if not config then
core.log.error("failed to get config from shared dict: ", err)
return
end
config, err = core.json.decode(config)
if not config then
core.log.error("failed to decode json: ", err)
return
end
config_yaml._update_config(config)
end
events:register(update_config, EVENT_UPDATE, EVENT_UPDATE)
patch_schema()
end
return _M

View File

@@ -0,0 +1,81 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local resource = require("apisix.admin.resource")
local stream_route_checker = require("apisix.stream.router.ip_port").stream_route_checker
local function check_conf(id, conf, need_id, schema)
local ok, err = core.schema.check(schema, conf)
if not ok then
return nil, {error_msg = "invalid configuration: " .. err}
end
local upstream_id = conf.upstream_id
if upstream_id then
local key = "/upstreams/" .. upstream_id
local res, err = core.etcd.get(key)
if not res then
return nil, {error_msg = "failed to fetch upstream info by "
.. "upstream id [" .. upstream_id .. "]: "
.. err}
end
if res.status ~= 200 then
return nil, {error_msg = "failed to fetch upstream info by "
.. "upstream id [" .. upstream_id .. "], "
.. "response code: " .. res.status}
end
end
local service_id = conf.service_id
if service_id then
local key = "/services/" .. service_id
local res, err = core.etcd.get(key)
if not res then
return nil, {error_msg = "failed to fetch service info by "
.. "service id [" .. service_id .. "]: "
.. err}
end
if res.status ~= 200 then
return nil, {error_msg = "failed to fetch service info by "
.. "service id [" .. service_id .. "], "
.. "response code: " .. res.status}
end
end
local ok, err = stream_route_checker(conf, true)
if not ok then
return nil, {error_msg = err}
end
return true
end
return resource.new({
name = "stream_routes",
kind = "stream route",
schema = core.schema.stream_route,
checker = check_conf,
unsupported_methods = { "patch" },
list_filter_fields = {
service_id = true,
upstream_id = true,
},
})

View File

@@ -0,0 +1,134 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local config_util = require("apisix.core.config_util")
local get_routes = require("apisix.router").http_routes
local get_services = require("apisix.http.service").services
local get_plugin_configs = require("apisix.plugin_config").plugin_configs
local get_consumers = require("apisix.consumer").consumers
local get_consumer_groups = require("apisix.consumer_group").consumer_groups
local get_global_rules = require("apisix.global_rules").global_rules
local apisix_upstream = require("apisix.upstream")
local resource = require("apisix.admin.resource")
local tostring = tostring
local ipairs = ipairs
local function check_conf(id, conf, need_id)
local ok, err = apisix_upstream.check_upstream_conf(conf)
if not ok then
return nil, {error_msg = err}
end
return true
end
local function up_id_in_plugins(plugins, up_id)
if plugins and plugins["traffic-split"]
and plugins["traffic-split"].rules then
for _, rule in ipairs(plugins["traffic-split"].rules) do
local plugin_upstreams = rule.weighted_upstreams
for _, plugin_upstream in ipairs(plugin_upstreams) do
if plugin_upstream.upstream_id
and tostring(plugin_upstream.upstream_id) == up_id then
return true
end
end
end
return false
end
end
local function check_resources_reference(resources, up_id,
only_check_plugin, resources_name)
if resources then
for _, resource in config_util.iterate_values(resources) do
if resource and resource.value then
if up_id_in_plugins(resource.value.plugins, up_id) then
return {error_msg = "can not delete this upstream,"
.. " plugin in "
.. resources_name .. " ["
.. resource.value.id
.. "] is still using it now"}
end
if not only_check_plugin and resource.value.upstream_id
and tostring(resource.value.upstream_id) == up_id then
return {error_msg = "can not delete this upstream, "
.. resources_name .. " [" .. resource.value.id
.. "] is still using it now"}
end
end
end
end
end
local function delete_checker(id)
local routes = get_routes()
local err_msg = check_resources_reference(routes, id, false, "route")
if err_msg then
return 400, err_msg
end
local services, services_ver = get_services()
core.log.info("services: ", core.json.delay_encode(services, true))
core.log.info("services_ver: ", services_ver)
local err_msg = check_resources_reference(services, id, false, "service")
if err_msg then
return 400, err_msg
end
local plugin_configs = get_plugin_configs()
local err_msg = check_resources_reference(plugin_configs, id, true, "plugin_config")
if err_msg then
return 400, err_msg
end
local consumers = get_consumers()
local err_msg = check_resources_reference(consumers, id, true, "consumer")
if err_msg then
return 400, err_msg
end
local consumer_groups = get_consumer_groups()
local err_msg = check_resources_reference(consumer_groups, id, true, "consumer_group")
if err_msg then
return 400, err_msg
end
local global_rules = get_global_rules()
err_msg = check_resources_reference(global_rules, id, true, "global_rules")
if err_msg then
return 400, err_msg
end
return nil, nil
end
return resource.new({
name = "upstreams",
kind = "upstream",
schema = core.schema.upstream,
checker = check_conf,
delete_checker = delete_checker
})

View File

@@ -0,0 +1,113 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local ngx_time = ngx.time
local tonumber = tonumber
local ipairs = ipairs
local pairs = pairs
local _M = {}
local function inject_timestamp(conf, prev_conf, patch_conf)
if not conf.create_time then
if prev_conf and (prev_conf.node or prev_conf.list).value.create_time then
conf.create_time = (prev_conf.node or prev_conf.list).value.create_time
else
-- As we don't know existent data's create_time, we have to pretend
-- they are created now.
conf.create_time = ngx_time()
end
end
if not conf.update_time or
-- For PATCH request, the modification is passed as 'patch_conf'
-- If the sub path is used, the 'patch_conf' will be a placeholder `true`
(patch_conf and (patch_conf == true or patch_conf.update_time == nil))
then
-- reset the update_time if:
-- 1. PATCH request, with sub path
-- 2. PATCH request, update_time not given
-- 3. Other request, update_time not given
conf.update_time = ngx_time()
end
end
_M.inject_timestamp = inject_timestamp
function _M.inject_conf_with_prev_conf(kind, key, conf)
local res, err = core.etcd.get(key)
if not res or (res.status ~= 200 and res.status ~= 404) then
core.log.error("failed to get " .. kind .. "[", key, "] from etcd: ", err or res.status)
return nil, err
end
if res.status == 404 then
inject_timestamp(conf)
else
inject_timestamp(conf, res.body)
end
return true
end
-- fix_count makes the "count" field returned by etcd reasonable
function _M.fix_count(body, id)
if body.count then
if not id then
-- remove the count of placeholder (init_dir)
body.count = tonumber(body.count) - 1
else
body.count = tonumber(body.count)
end
end
end
function _M.decrypt_params(decrypt_func, body, schema_type)
-- list
if body.list then
for _, route in ipairs(body.list) do
if route.value and route.value.plugins then
for name, conf in pairs(route.value.plugins) do
decrypt_func(name, conf, schema_type)
end
end
end
return
end
-- node
local plugins = body.node and body.node.value
and body.node.value.plugins
if plugins then
for name, conf in pairs(plugins) do
decrypt_func(name, conf, schema_type)
end
end
-- metadata
if schema_type == core.schema.TYPE_METADATA then
local conf = body.node and body.node.value
decrypt_func(conf.name, conf, schema_type)
end
end
return _M

View File

@@ -0,0 +1,249 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local type = type
local pairs = pairs
local tonumber = tonumber
local ngx = ngx
local re_find = ngx.re.find
local fetch_local_conf = require("apisix.core.config_local").local_conf
local try_read_attr = require("apisix.core.table").try_read_attr
local deepcopy = require("apisix.core.table").deepcopy
local log = require("apisix.core.log")
local request = require("apisix.core.request")
local response = require("apisix.core.response")
local table = require("apisix.core.table")
local _M = {}
local admin_api_version
local function enable_v3()
if admin_api_version then
if admin_api_version == "v3" then
return true
end
if admin_api_version == "default" then
return false
end
end
local local_conf, err = fetch_local_conf()
if not local_conf then
admin_api_version = "default"
log.error("failed to fetch local conf: ", err)
return false
end
local api_ver = try_read_attr(local_conf, "deployment", "admin", "admin_api_version")
if api_ver ~= "v3" then
admin_api_version = "default"
return false
end
admin_api_version = api_ver
return true
end
_M.enable_v3 = enable_v3
function _M.to_v3(body, action)
if not enable_v3() then
body.action = action
end
end
function _M.to_v3_list(body)
if not enable_v3() then
return
end
if body.node.dir then
body.list = body.node.nodes
body.node = nil
end
end
local function sort(l, r)
return l.createdIndex < r.createdIndex
end
local function pagination(body, args)
args.page = tonumber(args.page)
args.page_size = tonumber(args.page_size)
if not args.page or not args.page_size then
return
end
if args.page_size < 10 or args.page_size > 500 then
return response.exit(400, "page_size must be between 10 and 500")
end
if not args.page or args.page < 1 then
-- default page is 1
args.page = 1
end
local list = body.list
-- sort nodes by there createdIndex
table.sort(list, sort)
local to = args.page * args.page_size
local from = to - args.page_size + 1
local res = table.new(20, 0)
for i = from, to do
if list[i] then
res[i - from + 1] = list[i]
end
end
body.list = res
end
local function _filter(item, args, resource)
if not args.filter then
return true
end
local filters, err = ngx.decode_args(args.filter or "", 100)
if not filters then
log.error("failed to decode filter args: ", err)
return false
end
for key, value in pairs(filters) do
if not resource.list_filter_fields[key] then
log.warn("filter field '", key, "' is not supported by resource: ", resource.name)
goto CONTINUE
end
if not item[key] then
return false
end
if type(value) == "table" then
value = value[#value] -- get the last value in the table
end
if item[key] ~= value then
return false
end
::CONTINUE::
end
return true
end
local function filter(body, args, resource)
for i = #body.list, 1, -1 do
local name_matched = true
local label_matched = true
local uri_matched = true
if args.name then
name_matched = false
local matched = re_find(body.list[i].value.name, args.name, "jo")
if matched then
name_matched = true
end
end
if args.label then
label_matched = false
if body.list[i].value.labels then
for k, _ in pairs(body.list[i].value.labels) do
if k == args.label then
label_matched = true
break
end
end
end
end
if args.uri then
uri_matched = false
if body.list[i].value.uri then
local matched = re_find(body.list[i].value.uri, args.uri, "jo")
if matched then
uri_matched = true
end
end
if body.list[i].value.uris then
for _, uri in pairs(body.list[i].value.uris) do
if re_find(uri, args.uri, "jo") then
uri_matched = true
break
end
end
end
end
if not name_matched or not label_matched or not uri_matched
or not _filter(body.list[i].value, args, resource) then
table.remove(body.list, i)
end
end
end
function _M.filter(body, resource)
if not enable_v3() then
return body
end
local args = request.get_uri_args()
local processed_body = deepcopy(body)
if processed_body.deleted then
processed_body.node = nil
end
-- strip node wrapping for single query, create, and update scenarios.
if processed_body.node then
processed_body = processed_body.node
end
-- filter and paging logic for list query only
if processed_body.list then
filter(processed_body, args, resource)
-- calculate the total amount of filtered data
processed_body.total = processed_body.list and #processed_body.list or 0
pagination(processed_body, args)
-- remove the count field returned by etcd
-- we don't need a field that reflects the length of the currently returned data,
-- it doesn't make sense
processed_body.count = nil
end
return processed_body
end
return _M

View File

@@ -0,0 +1,116 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local require = require
local router = require("apisix.utils.router")
local plugin_mod = require("apisix.plugin")
local core = require("apisix.core")
local ipairs = ipairs
local ngx_header = ngx.header
local type = type
local _M = {}
local match_opts = {}
local has_route_not_under_apisix
local fetch_api_router
do
local routes = {}
function fetch_api_router()
core.table.clear(routes)
has_route_not_under_apisix = false
for _, plugin in ipairs(plugin_mod.plugins) do
local api_fun = plugin.api
if api_fun then
local api_routes = api_fun()
core.log.debug("fetched api routes: ",
core.json.delay_encode(api_routes, true))
for _, route in ipairs(api_routes) do
if route.uri == nil then
core.log.error("got nil uri in api route: ",
core.json.delay_encode(route, true))
break
end
local typ_uri = type(route.uri)
if not has_route_not_under_apisix then
if typ_uri == "string" then
if not core.string.has_prefix(route.uri, "/apisix/") then
has_route_not_under_apisix = true
end
else
for _, uri in ipairs(route.uri) do
if not core.string.has_prefix(uri, "/apisix/") then
has_route_not_under_apisix = true
break
end
end
end
end
core.table.insert(routes, {
methods = route.methods,
paths = route.uri,
handler = function (api_ctx)
local code, body = route.handler(api_ctx)
if code or body then
if type(body) == "table" and ngx_header["Content-Type"] == nil then
core.response.set_header("Content-Type", "application/json")
end
core.response.exit(code, body)
end
end
})
end
end
end
return router.new(routes)
end
end -- do
function _M.has_route_not_under_apisix()
if has_route_not_under_apisix == nil then
return true
end
return has_route_not_under_apisix
end
function _M.match(api_ctx)
local api_router = core.lrucache.global("api_router", plugin_mod.load_times, fetch_api_router)
if not api_router then
core.log.error("failed to fetch valid api router")
return false
end
core.table.clear(match_opts)
match_opts.method = api_ctx.var.request_method
local ok = api_router:dispatch(api_ctx.var.uri, match_opts, api_ctx)
return ok
end
return _M

View File

@@ -0,0 +1,400 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local require = require
local balancer = require("ngx.balancer")
local core = require("apisix.core")
local priority_balancer = require("apisix.balancer.priority")
local apisix_upstream = require("apisix.upstream")
local ipairs = ipairs
local is_http = ngx.config.subsystem == "http"
local enable_keepalive = balancer.enable_keepalive and is_http
local set_more_tries = balancer.set_more_tries
local get_last_failure = balancer.get_last_failure
local set_timeouts = balancer.set_timeouts
local ngx_now = ngx.now
local str_byte = string.byte
local module_name = "balancer"
local pickers = {}
local lrucache_server_picker = core.lrucache.new({
ttl = 300, count = 256
})
local lrucache_addr = core.lrucache.new({
ttl = 300, count = 1024 * 4
})
local _M = {
version = 0.2,
name = module_name,
}
local function transform_node(new_nodes, node)
if not new_nodes._priority_index then
new_nodes._priority_index = {}
end
if not new_nodes[node.priority] then
new_nodes[node.priority] = {}
core.table.insert(new_nodes._priority_index, node.priority)
end
new_nodes[node.priority][node.host .. ":" .. node.port] = node.weight
return new_nodes
end
local function fetch_health_nodes(upstream, checker)
local nodes = upstream.nodes
if not checker then
local new_nodes = core.table.new(0, #nodes)
for _, node in ipairs(nodes) do
new_nodes = transform_node(new_nodes, node)
end
return new_nodes
end
local host = upstream.checks and upstream.checks.active and upstream.checks.active.host
local port = upstream.checks and upstream.checks.active and upstream.checks.active.port
local up_nodes = core.table.new(0, #nodes)
for _, node in ipairs(nodes) do
local ok, err = checker:get_target_status(node.host, port or node.port, host)
if ok then
up_nodes = transform_node(up_nodes, node)
elseif err then
core.log.warn("failed to get health check target status, addr: ",
node.host, ":", port or node.port, ", host: ", host, ", err: ", err)
end
end
if core.table.nkeys(up_nodes) == 0 then
core.log.warn("all upstream nodes is unhealthy, use default")
for _, node in ipairs(nodes) do
up_nodes = transform_node(up_nodes, node)
end
end
return up_nodes
end
local function create_server_picker(upstream, checker)
local picker = pickers[upstream.type]
if not picker then
pickers[upstream.type] = require("apisix.balancer." .. upstream.type)
picker = pickers[upstream.type]
end
if picker then
local nodes = upstream.nodes
local addr_to_domain = {}
for _, node in ipairs(nodes) do
if node.domain then
local addr = node.host .. ":" .. node.port
addr_to_domain[addr] = node.domain
end
end
local up_nodes = fetch_health_nodes(upstream, checker)
if #up_nodes._priority_index > 1 then
core.log.info("upstream nodes: ", core.json.delay_encode(up_nodes))
local server_picker = priority_balancer.new(up_nodes, upstream, picker)
server_picker.addr_to_domain = addr_to_domain
return server_picker
end
core.log.info("upstream nodes: ",
core.json.delay_encode(up_nodes[up_nodes._priority_index[1]]))
local server_picker = picker.new(up_nodes[up_nodes._priority_index[1]], upstream)
server_picker.addr_to_domain = addr_to_domain
return server_picker
end
return nil, "invalid balancer type: " .. upstream.type, 0
end
local function parse_addr(addr)
local host, port, err = core.utils.parse_addr(addr)
return {host = host, port = port}, err
end
-- set_balancer_opts will be called in balancer phase and before any tries
local function set_balancer_opts(route, ctx)
local up_conf = ctx.upstream_conf
-- If the matched route has timeout config, prefer to use the route config.
local timeout = nil
if route and route.value and route.value.timeout then
timeout = route.value.timeout
else
if up_conf.timeout then
timeout = up_conf.timeout
end
end
if timeout then
local ok, err = set_timeouts(timeout.connect, timeout.send,
timeout.read)
if not ok then
core.log.error("could not set upstream timeouts: ", err)
end
end
local retries = up_conf.retries
if not retries or retries < 0 then
retries = #up_conf.nodes - 1
end
if retries > 0 then
if up_conf.retry_timeout and up_conf.retry_timeout > 0 then
ctx.proxy_retry_deadline = ngx_now() + up_conf.retry_timeout
end
local ok, err = set_more_tries(retries)
if not ok then
core.log.error("could not set upstream retries: ", err)
elseif err then
core.log.warn("could not set upstream retries: ", err)
end
end
end
local function parse_server_for_upstream_host(picked_server, upstream_scheme)
local standard_port = apisix_upstream.scheme_to_port[upstream_scheme]
local host = picked_server.domain or picked_server.host
if upstream_scheme and (not standard_port or standard_port ~= picked_server.port) then
host = host .. ":" .. picked_server.port
end
return host
end
-- pick_server will be called:
-- 1. in the access phase so that we can set headers according to the picked server
-- 2. each time we need to retry upstream
local function pick_server(route, ctx)
core.log.info("route: ", core.json.delay_encode(route, true))
core.log.info("ctx: ", core.json.delay_encode(ctx, true))
local up_conf = ctx.upstream_conf
for _, node in ipairs(up_conf.nodes) do
if core.utils.parse_ipv6(node.host) and str_byte(node.host, 1) ~= str_byte("[") then
node.host = '[' .. node.host .. ']'
end
end
local nodes_count = #up_conf.nodes
if nodes_count == 1 then
local node = up_conf.nodes[1]
ctx.balancer_ip = node.host
ctx.balancer_port = node.port
node.upstream_host = parse_server_for_upstream_host(node, ctx.upstream_scheme)
return node
end
local version = ctx.upstream_version
local key = ctx.upstream_key
local checker = ctx.up_checker
ctx.balancer_try_count = (ctx.balancer_try_count or 0) + 1
if ctx.balancer_try_count > 1 then
if ctx.server_picker and ctx.server_picker.after_balance then
ctx.server_picker.after_balance(ctx, true)
end
if checker then
local state, code = get_last_failure()
local host = up_conf.checks and up_conf.checks.active and up_conf.checks.active.host
local port = up_conf.checks and up_conf.checks.active and up_conf.checks.active.port
if state == "failed" then
if code == 504 then
checker:report_timeout(ctx.balancer_ip, port or ctx.balancer_port, host)
else
checker:report_tcp_failure(ctx.balancer_ip, port or ctx.balancer_port, host)
end
else
checker:report_http_status(ctx.balancer_ip, port or ctx.balancer_port, host, code)
end
end
end
if checker then
version = version .. "#" .. checker.status_ver
end
-- the same picker will be used in the whole request, especially during the retry
local server_picker = ctx.server_picker
if not server_picker then
server_picker = lrucache_server_picker(key, version,
create_server_picker, up_conf, checker)
end
if not server_picker then
return nil, "failed to fetch server picker"
end
local server, err = server_picker.get(ctx)
if not server then
err = err or "no valid upstream node"
return nil, "failed to find valid upstream server, " .. err
end
ctx.balancer_server = server
local domain = server_picker.addr_to_domain[server]
local res, err = lrucache_addr(server, nil, parse_addr, server)
if err then
core.log.error("failed to parse server addr: ", server, " err: ", err)
return core.response.exit(502)
end
res.domain = domain
ctx.balancer_ip = res.host
ctx.balancer_port = res.port
ctx.server_picker = server_picker
res.upstream_host = parse_server_for_upstream_host(res, ctx.upstream_scheme)
return res
end
-- for test
_M.pick_server = pick_server
local set_current_peer
do
local pool_opt = {}
local default_keepalive_pool
function set_current_peer(server, ctx)
local up_conf = ctx.upstream_conf
local keepalive_pool = up_conf.keepalive_pool
if enable_keepalive then
if not keepalive_pool then
if not default_keepalive_pool then
local local_conf = core.config.local_conf()
local up_keepalive_conf =
core.table.try_read_attr(local_conf, "nginx_config",
"http", "upstream")
default_keepalive_pool = {}
default_keepalive_pool.idle_timeout =
core.config_util.parse_time_unit(up_keepalive_conf.keepalive_timeout)
default_keepalive_pool.size = up_keepalive_conf.keepalive
default_keepalive_pool.requests = up_keepalive_conf.keepalive_requests
end
keepalive_pool = default_keepalive_pool
end
local idle_timeout = keepalive_pool.idle_timeout
local size = keepalive_pool.size
local requests = keepalive_pool.requests
core.table.clear(pool_opt)
pool_opt.pool_size = size
local scheme = up_conf.scheme
local pool = scheme .. "#" .. server.host .. "#" .. server.port
-- other TLS schemes don't use http balancer keepalive
if (scheme == "https" or scheme == "grpcs") then
local sni = ctx.var.upstream_host
pool = pool .. "#" .. sni
if up_conf.tls and up_conf.tls.client_cert then
pool = pool .. "#" .. up_conf.tls.client_cert
end
end
pool_opt.pool = pool
local ok, err = balancer.set_current_peer(server.host, server.port,
pool_opt)
if not ok then
return ok, err
end
return balancer.enable_keepalive(idle_timeout, requests)
end
return balancer.set_current_peer(server.host, server.port)
end
end
function _M.run(route, ctx, plugin_funcs)
local server, err
if ctx.picked_server then
-- use the server picked in the access phase
server = ctx.picked_server
ctx.picked_server = nil
set_balancer_opts(route, ctx)
else
if ctx.proxy_retry_deadline and ctx.proxy_retry_deadline < ngx_now() then
-- retry count is (try count - 1)
core.log.error("proxy retry timeout, retry count: ", (ctx.balancer_try_count or 1) - 1,
", deadline: ", ctx.proxy_retry_deadline, " now: ", ngx_now())
return core.response.exit(502)
end
-- retry
server, err = pick_server(route, ctx)
if not server then
core.log.error("failed to pick server: ", err)
return core.response.exit(502)
end
local header_changed
local pass_host = ctx.pass_host
if pass_host == "node" then
local host = server.upstream_host
if host ~= ctx.var.upstream_host then
-- retried node has a different host
ctx.var.upstream_host = host
header_changed = true
end
end
local _, run = plugin_funcs("before_proxy")
-- always recreate request as the request may be changed by plugins
if run or header_changed then
balancer.recreate_request()
end
end
core.log.info("proxy request to ", server.host, ":", server.port)
local ok, err = set_current_peer(server, ctx)
if not ok then
core.log.error("failed to set server peer [", server.host, ":",
server.port, "] err: ", err)
return core.response.exit(502)
end
ctx.proxy_passed = true
end
function _M.init_worker()
end
return _M

View File

@@ -0,0 +1,154 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local resty_chash = require("resty.chash")
local str_char = string.char
local str_gsub = string.gsub
local pairs = pairs
local CONSISTENT_POINTS = 160 -- points per server, taken from `resty.chash`
local _M = {}
local function fetch_chash_hash_key(ctx, upstream)
local key = upstream.key
local hash_on = upstream.hash_on or "vars"
local chash_key
if hash_on == "consumer" then
chash_key = ctx.consumer_name
elseif hash_on == "vars" then
chash_key = ctx.var[key]
elseif hash_on == "header" then
chash_key = ctx.var["http_" .. key]
elseif hash_on == "cookie" then
chash_key = ctx.var["cookie_" .. key]
elseif hash_on == "vars_combinations" then
local err, n_resolved
chash_key, err, n_resolved = core.utils.resolve_var(key, ctx.var)
if err then
core.log.error("could not resolve vars in ", key, " error: ", err)
end
if n_resolved == 0 then
chash_key = nil
end
end
if not chash_key then
chash_key = ctx.var["remote_addr"]
core.log.warn("chash_key fetch is nil, use default chash_key ",
"remote_addr: ", chash_key)
end
core.log.info("upstream key: ", key)
core.log.info("hash_on: ", hash_on)
core.log.info("chash_key: ", core.json.delay_encode(chash_key))
return chash_key
end
function _M.new(up_nodes, upstream)
local str_null = str_char(0)
local nodes_count = 0
local safe_limit = 0
local gcd = 0
local servers, nodes = {}, {}
for serv, weight in pairs(up_nodes) do
if gcd == 0 then
gcd = weight
else
gcd = core.math.gcd(gcd, weight)
end
end
if gcd == 0 then
-- all nodes' weight are 0
gcd = 1
end
for serv, weight in pairs(up_nodes) do
local id = str_gsub(serv, ":", str_null)
nodes_count = nodes_count + 1
weight = weight / gcd
safe_limit = safe_limit + weight
servers[id] = serv
nodes[id] = weight
end
safe_limit = safe_limit * CONSISTENT_POINTS
local picker = resty_chash:new(nodes)
return {
upstream = upstream,
get = function (ctx)
local id
if ctx.balancer_tried_servers then
if ctx.balancer_tried_servers_count == nodes_count then
return nil, "all upstream servers tried"
end
-- the 'safe_limit' is a best effort limit to prevent infinite loop caused by bug
for i = 1, safe_limit do
id, ctx.chash_last_server_index = picker:next(ctx.chash_last_server_index)
if not ctx.balancer_tried_servers[servers[id]] then
break
end
end
else
local chash_key = fetch_chash_hash_key(ctx, upstream)
id, ctx.chash_last_server_index = picker:find(chash_key)
end
-- core.log.warn("chash id: ", id, " val: ", servers[id])
return servers[id]
end,
after_balance = function (ctx, before_retry)
if not before_retry then
if ctx.balancer_tried_servers then
core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers)
ctx.balancer_tried_servers = nil
end
return nil
end
if not ctx.balancer_tried_servers then
ctx.balancer_tried_servers = core.tablepool.fetch("balancer_tried_servers", 0, 2)
end
ctx.balancer_tried_servers[ctx.balancer_server] = true
ctx.balancer_tried_servers_count = (ctx.balancer_tried_servers_count or 0) + 1
end,
before_retry_next_priority = function (ctx)
if ctx.balancer_tried_servers then
core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers)
ctx.balancer_tried_servers = nil
end
ctx.balancer_tried_servers_count = 0
end,
}
end
return _M

View File

@@ -0,0 +1,243 @@
-- Original Authors: Shiv Nagarajan & Scott Francis
-- Accessed: March 12, 2018
-- Inspiration drawn from:
-- https://github.com/twitter/finagle/blob/1bc837c4feafc0096e43c0e98516a8e1c50c4421
-- /finagle-core/src/main/scala/com/twitter/finagle/loadbalancer/PeakEwma.scala
local core = require("apisix.core")
local resty_lock = require("resty.lock")
local nkeys = core.table.nkeys
local table_insert = core.table.insert
local ngx = ngx
local ngx_shared = ngx.shared
local ngx_now = ngx.now
local math = math
local pairs = pairs
local ipairs = ipairs
local next = next
local error = error
local DECAY_TIME = 10 -- this value is in seconds
local LOCK_KEY = ":ewma_key"
local shm_ewma = ngx_shared["balancer-ewma"]
local shm_last_touched_at = ngx_shared["balancer-ewma-last-touched-at"]
local lrucache_addr = core.lrucache.new({ttl = 300, count = 1024})
local lrucache_trans_format = core.lrucache.new({ttl = 300, count = 256})
local ewma_lock, ewma_lock_err = resty_lock:new("balancer-ewma-locks", {timeout = 0, exptime = 0.1})
local _M = {name = "ewma"}
local function lock(upstream)
local _, err = ewma_lock:lock(upstream .. LOCK_KEY)
if err and err ~= "timeout" then
core.log.error("EWMA Balancer failed to lock: ", err)
end
return err
end
local function unlock()
local ok, err = ewma_lock:unlock()
if not ok then
core.log.error("EWMA Balancer failed to unlock: ", err)
end
return err
end
local function decay_ewma(ewma, last_touched_at, rtt, now)
local td = now - last_touched_at
td = math.max(td, 0)
local weight = math.exp(-td / DECAY_TIME)
ewma = ewma * weight + rtt * (1.0 - weight)
return ewma
end
local function store_stats(upstream, ewma, now)
local success, err, forcible = shm_last_touched_at:set(upstream, now)
if not success then
core.log.error("shm_last_touched_at:set failed: ", err)
end
if forcible then
core.log.warn("shm_last_touched_at:set valid items forcibly overwritten")
end
success, err, forcible = shm_ewma:set(upstream, ewma)
if not success then
core.log.error("shm_ewma:set failed: ", err)
end
if forcible then
core.log.warn("shm_ewma:set valid items forcibly overwritten")
end
end
local function get_or_update_ewma(upstream, rtt, update)
if update then
local lock_err = lock(upstream)
if lock_err ~= nil then
return 0, lock_err
end
end
local ewma = shm_ewma:get(upstream) or 0
local now = ngx_now()
local last_touched_at = shm_last_touched_at:get(upstream) or 0
ewma = decay_ewma(ewma, last_touched_at, rtt, now)
if not update then
return ewma, nil
end
store_stats(upstream, ewma, now)
unlock()
return ewma, nil
end
local function get_upstream_name(upstream)
return upstream.host .. ":" .. upstream.port
end
local function score(upstream)
-- Original implementation used names
-- Endpoints don't have names, so passing in IP:Port as key instead
local upstream_name = get_upstream_name(upstream)
return get_or_update_ewma(upstream_name, 0, false)
end
local function parse_addr(addr)
local host, port, err = core.utils.parse_addr(addr)
return {host = host, port = port}, err
end
local function _trans_format(up_nodes)
-- trans
-- {"1.2.3.4:80":100,"5.6.7.8:8080":100}
-- into
-- [{"host":"1.2.3.4","port":"80"},{"host":"5.6.7.8","port":"8080"}]
local peers = {}
local res, err
for addr, _ in pairs(up_nodes) do
res, err = lrucache_addr(addr, nil, parse_addr, addr)
if not err then
core.table.insert(peers, res)
else
core.log.error('parse_addr error: ', addr, err)
end
end
return next(peers) and peers or nil
end
local function _ewma_find(ctx, up_nodes)
local peers
if not up_nodes or nkeys(up_nodes) == 0 then
return nil, 'up_nodes empty'
end
if ctx.balancer_tried_servers and ctx.balancer_tried_servers_count == nkeys(up_nodes) then
return nil, "all upstream servers tried"
end
peers = lrucache_trans_format(up_nodes, ctx.upstream_version, _trans_format, up_nodes)
if not peers then
return nil, 'up_nodes trans error'
end
local filtered_peers
if ctx.balancer_tried_servers then
for _, peer in ipairs(peers) do
if not ctx.balancer_tried_servers[get_upstream_name(peer)] then
if not filtered_peers then
filtered_peers = {}
end
table_insert(filtered_peers, peer)
end
end
else
filtered_peers = peers
end
local endpoint = filtered_peers[1]
if #filtered_peers > 1 then
local a, b = math.random(1, #filtered_peers), math.random(1, #filtered_peers - 1)
if b >= a then
b = b + 1
end
local backendpoint
endpoint, backendpoint = filtered_peers[a], filtered_peers[b]
if score(endpoint) > score(backendpoint) then
endpoint = backendpoint
end
end
return get_upstream_name(endpoint)
end
local function _ewma_after_balance(ctx, before_retry)
if before_retry then
if not ctx.balancer_tried_servers then
ctx.balancer_tried_servers = core.tablepool.fetch("balancer_tried_servers", 0, 2)
end
ctx.balancer_tried_servers[ctx.balancer_server] = true
ctx.balancer_tried_servers_count = (ctx.balancer_tried_servers_count or 0) + 1
return nil
end
if ctx.balancer_tried_servers then
core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers)
ctx.balancer_tried_servers = nil
end
local response_time = ctx.var.upstream_response_time or 0
local connect_time = ctx.var.upstream_connect_time or 0
local rtt = connect_time + response_time
local upstream = ctx.var.upstream_addr
if not upstream then
return nil, "no upstream addr found"
end
return get_or_update_ewma(upstream, rtt, true)
end
function _M.new(up_nodes, upstream)
if not shm_ewma or not shm_last_touched_at then
return nil, "dictionary not find"
end
if not ewma_lock then
error(ewma_lock_err)
end
return {
upstream = upstream,
get = function(ctx)
return _ewma_find(ctx, up_nodes)
end,
after_balance = _ewma_after_balance,
before_retry_next_priority = function (ctx)
if ctx.balancer_tried_servers then
core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers)
ctx.balancer_tried_servers = nil
end
ctx.balancer_tried_servers_count = 0
end,
}
end
return _M

View File

@@ -0,0 +1,113 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local binaryHeap = require("binaryheap")
local ipairs = ipairs
local pairs = pairs
local _M = {}
local function least_score(a, b)
return a.score < b.score
end
function _M.new(up_nodes, upstream)
local servers_heap = binaryHeap.minUnique(least_score)
for server, weight in pairs(up_nodes) do
local score = 1 / weight
-- Note: the argument order of insert is different from others
servers_heap:insert({
server = server,
effect_weight = 1 / weight,
score = score,
}, server)
end
return {
upstream = upstream,
get = function (ctx)
local server, info, err
if ctx.balancer_tried_servers then
local tried_server_list = {}
while true do
server, info = servers_heap:peek()
-- we need to let the retry > #nodes so this branch can be hit and
-- the request will retry next priority of nodes
if server == nil then
err = "all upstream servers tried"
break
end
if not ctx.balancer_tried_servers[server] then
break
end
servers_heap:pop()
core.table.insert(tried_server_list, info)
end
for _, info in ipairs(tried_server_list) do
servers_heap:insert(info, info.server)
end
else
server, info = servers_heap:peek()
end
if not server then
return nil, err
end
info.score = info.score + info.effect_weight
servers_heap:update(server, info)
return server
end,
after_balance = function (ctx, before_retry)
local server = ctx.balancer_server
local info = servers_heap:valueByPayload(server)
info.score = info.score - info.effect_weight
servers_heap:update(server, info)
if not before_retry then
if ctx.balancer_tried_servers then
core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers)
ctx.balancer_tried_servers = nil
end
return nil
end
if not ctx.balancer_tried_servers then
ctx.balancer_tried_servers = core.tablepool.fetch("balancer_tried_servers", 0, 2)
end
ctx.balancer_tried_servers[server] = true
end,
before_retry_next_priority = function (ctx)
if ctx.balancer_tried_servers then
core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers)
ctx.balancer_tried_servers = nil
end
end,
}
end
return _M

View File

@@ -0,0 +1,81 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local ipairs = ipairs
local _M = {}
local function max_priority(a, b)
return a > b
end
function _M.new(up_nodes, upstream, picker_mod)
local priority_index = up_nodes._priority_index
core.table.sort(priority_index, max_priority)
local pickers = core.table.new(#priority_index, 0)
for i, priority in ipairs(priority_index) do
local picker, err = picker_mod.new(up_nodes[priority], upstream)
if not picker then
return nil, "failed to create picker with priority " .. priority .. ": " .. err
end
if not picker.before_retry_next_priority then
return nil, "picker should define 'before_retry_next_priority' to reset ctx"
end
pickers[i] = picker
end
return {
upstream = upstream,
get = function (ctx)
for i = ctx.priority_balancer_picker_idx or 1, #pickers do
local picker = pickers[i]
local server, err = picker.get(ctx)
if server then
ctx.priority_balancer_picker_idx = i
return server
end
core.log.notice("failed to get server from current priority ",
priority_index[i],
", try next one, err: ", err)
picker.before_retry_next_priority(ctx)
end
return nil, "all servers tried"
end,
after_balance = function (ctx, before_retry)
local priority_balancer_picker = pickers[ctx.priority_balancer_picker_idx]
if not priority_balancer_picker or
not priority_balancer_picker.after_balance
then
return
end
priority_balancer_picker.after_balance(ctx, before_retry)
end
}
end
return _M

View File

@@ -0,0 +1,89 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local roundrobin = require("resty.roundrobin")
local core = require("apisix.core")
local nkeys = core.table.nkeys
local pairs = pairs
local _M = {}
function _M.new(up_nodes, upstream)
local safe_limit = 0
for _, weight in pairs(up_nodes) do
-- the weight can be zero
safe_limit = safe_limit + weight + 1
end
local picker = roundrobin:new(up_nodes)
local nodes_count = nkeys(up_nodes)
return {
upstream = upstream,
get = function (ctx)
if ctx.balancer_tried_servers and ctx.balancer_tried_servers_count == nodes_count then
return nil, "all upstream servers tried"
end
local server, err
for i = 1, safe_limit do
server, err = picker:find()
if not server then
return nil, err
end
if ctx.balancer_tried_servers then
if not ctx.balancer_tried_servers[server] then
break
end
else
break
end
end
return server
end,
after_balance = function (ctx, before_retry)
if not before_retry then
if ctx.balancer_tried_servers then
core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers)
ctx.balancer_tried_servers = nil
end
return nil
end
if not ctx.balancer_tried_servers then
ctx.balancer_tried_servers = core.tablepool.fetch("balancer_tried_servers", 0, 2)
end
ctx.balancer_tried_servers[ctx.balancer_server] = true
ctx.balancer_tried_servers_count = (ctx.balancer_tried_servers_count or 0) + 1
end,
before_retry_next_priority = function (ctx)
if ctx.balancer_tried_servers then
core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers)
ctx.balancer_tried_servers = nil
end
ctx.balancer_tried_servers_count = 0
end,
}
end
return _M

View File

@@ -0,0 +1,40 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local pkg_cpath_org = package.cpath
local pkg_path_org = package.path
local _, find_pos_end = string.find(pkg_path_org, ";", -1, true)
if not find_pos_end then
pkg_path_org = pkg_path_org .. ";"
end
local apisix_home = "/usr/local/apisix"
local pkg_cpath = apisix_home .. "/deps/lib64/lua/5.1/?.so;"
.. apisix_home .. "/deps/lib/lua/5.1/?.so;"
local pkg_path_deps = apisix_home .. "/deps/share/lua/5.1/?.lua;"
local pkg_path_env = apisix_home .. "/?.lua;"
-- modify the load path to load our dependencies
package.cpath = pkg_cpath .. pkg_cpath_org
package.path = pkg_path_deps .. pkg_path_org .. pkg_path_env
-- pass path to construct the final result
local env = require("apisix.cli.env")(apisix_home, pkg_cpath_org, pkg_path_org)
local ops = require("apisix.cli.ops")
ops.execute(env, arg)

View File

@@ -0,0 +1,385 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local table_conact = table.concat
local _M = {
apisix = {
node_listen = { 9080 },
enable_admin = true,
enable_dev_mode = false,
enable_reuseport = true,
show_upstream_status_in_response_header = false,
enable_ipv6 = true,
enable_http2 = true,
enable_server_tokens = true,
extra_lua_path = "",
extra_lua_cpath = "",
proxy_cache = {
cache_ttl = "10s",
zones = {
{
name = "disk_cache_one",
memory_size = "50m",
disk_size = "1G",
disk_path = "/tmp/disk_cache_one",
cache_levels = "1:2"
},
{
name = "memory_cache",
memory_size = "50m"
}
}
},
delete_uri_tail_slash = false,
normalize_uri_like_servlet = false,
router = {
http = "radixtree_host_uri",
ssl = "radixtree_sni"
},
proxy_mode = "http",
resolver_timeout = 5,
enable_resolv_search_opt = true,
ssl = {
enable = true,
listen = { {
port = 9443,
enable_http3 = false
} },
ssl_protocols = "TLSv1.2 TLSv1.3",
ssl_ciphers = table_conact({
"ECDHE-ECDSA-AES128-GCM-SHA256", "ECDHE-RSA-AES128-GCM-SHA256",
"ECDHE-ECDSA-AES256-GCM-SHA384", "ECDHE-RSA-AES256-GCM-SHA384",
"ECDHE-ECDSA-CHACHA20-POLY1305", "ECDHE-RSA-CHACHA20-POLY1305",
"DHE-RSA-AES128-GCM-SHA256", "DHE-RSA-AES256-GCM-SHA384",
}, ":"),
ssl_session_tickets = false,
ssl_trusted_certificate = "system"
},
enable_control = true,
disable_sync_configuration_during_start = false,
data_encryption = {
enable_encrypt_fields = true,
keyring = { "qeddd145sfvddff3", "edd1c9f0985e76a2" }
},
events = {
module = "lua-resty-events"
}
},
nginx_config = {
error_log = "logs/error.log",
error_log_level = "warn",
worker_processes = "auto",
enable_cpu_affinity = false,
worker_rlimit_nofile = 20480,
worker_shutdown_timeout = "240s",
max_pending_timers = 16384,
max_running_timers = 4096,
event = {
worker_connections = 10620
},
meta = {
lua_shared_dict = {
["prometheus-metrics"] = "15m",
["standalone-config"] = "10m",
["status-report"] = "1m",
}
},
stream = {
enable_access_log = false,
access_log = "logs/access_stream.log",
-- luacheck: push max code line length 300
access_log_format = "$remote_addr [$time_local] $protocol $status $bytes_sent $bytes_received $session_time",
-- luacheck: pop
access_log_format_escape = "default",
lua_shared_dict = {
["etcd-cluster-health-check-stream"] = "10m",
["lrucache-lock-stream"] = "10m",
["plugin-limit-conn-stream"] = "10m",
["worker-events-stream"] = "10m",
["tars-stream"] = "1m",
["upstream-healthcheck-stream"] = "10m",
}
},
main_configuration_snippet = "",
http_configuration_snippet = "",
http_server_configuration_snippet = "",
http_server_location_configuration_snippet = "",
http_admin_configuration_snippet = "",
http_end_configuration_snippet = "",
stream_configuration_snippet = "",
http = {
enable_access_log = true,
access_log = "logs/access.log",
access_log_buffer = 16384,
-- luacheck: push max code line length 300
access_log_format =
'$remote_addr - $remote_user [$time_local] $http_host "$request" $status $body_bytes_sent $request_time "$http_referer" "$http_user_agent" $upstream_addr $upstream_status $upstream_response_time "$upstream_scheme://$upstream_host$upstream_uri"',
-- luacheck: pop
access_log_format_escape = "default",
keepalive_timeout = "60s",
client_header_timeout = "60s",
client_body_timeout = "60s",
client_max_body_size = 0,
send_timeout = "10s",
underscores_in_headers = "on",
real_ip_header = "X-Real-IP",
real_ip_recursive = "off",
real_ip_from = { "127.0.0.1", "unix:" },
proxy_ssl_server_name = true,
upstream = {
keepalive = 320,
keepalive_requests = 1000,
keepalive_timeout = "60s"
},
charset = "utf-8",
variables_hash_max_size = 2048,
lua_shared_dict = {
["internal-status"] = "10m",
["plugin-limit-req"] = "10m",
["plugin-limit-count"] = "10m",
["prometheus-metrics"] = "10m",
["plugin-limit-conn"] = "10m",
["upstream-healthcheck"] = "10m",
["worker-events"] = "10m",
["lrucache-lock"] = "10m",
["balancer-ewma"] = "10m",
["balancer-ewma-locks"] = "10m",
["balancer-ewma-last-touched-at"] = "10m",
["plugin-limit-req-redis-cluster-slot-lock"] = "1m",
["plugin-limit-count-redis-cluster-slot-lock"] = "1m",
["plugin-limit-conn-redis-cluster-slot-lock"] = "1m",
["plugin-ai-rate-limiting"] = "10m",
["plugin-ai-rate-limiting-reset-header"] = "10m",
tracing_buffer = "10m",
["plugin-api-breaker"] = "10m",
["etcd-cluster-health-check"] = "10m",
discovery = "1m",
jwks = "1m",
introspection = "10m",
["access-tokens"] = "1m",
["ext-plugin"] = "1m",
tars = "1m",
["cas-auth"] = "10m",
["ocsp-stapling"] = "10m",
["mcp-session"] = "10m",
}
}
},
graphql = {
max_size = 1048576
},
plugins = {
"real-ip",
"ai",
"client-control",
"proxy-control",
"request-id",
"zipkin",
"ext-plugin-pre-req",
"fault-injection",
"mocking",
"serverless-pre-function",
"cors",
"ip-restriction",
"ua-restriction",
"referer-restriction",
"csrf",
"uri-blocker",
"request-validation",
"chaitin-waf",
"multi-auth",
"openid-connect",
"cas-auth",
"authz-casbin",
"authz-casdoor",
"wolf-rbac",
"ldap-auth",
"hmac-auth",
"basic-auth",
"jwt-auth",
"jwe-decrypt",
"key-auth",
"consumer-restriction",
"attach-consumer-label",
"forward-auth",
"opa",
"authz-keycloak",
"proxy-cache",
"body-transformer",
"ai-prompt-template",
"ai-prompt-decorator",
"ai-prompt-guard",
"ai-rag",
"ai-rate-limiting",
"ai-proxy-multi",
"ai-proxy",
"ai-aws-content-moderation",
"proxy-mirror",
"proxy-rewrite",
"workflow",
"api-breaker",
"limit-conn",
"limit-count",
"limit-req",
"gzip",
-- deprecated and will be removed in a future release
-- "server-info",
"traffic-split",
"redirect",
"response-rewrite",
"mcp-bridge",
"degraphql",
"kafka-proxy",
"grpc-transcode",
"grpc-web",
"http-dubbo",
"public-api",
"prometheus",
"datadog",
"lago",
"loki-logger",
"elasticsearch-logger",
"echo",
"loggly",
"http-logger",
"splunk-hec-logging",
"skywalking-logger",
"google-cloud-logging",
"sls-logger",
"tcp-logger",
"kafka-logger",
"rocketmq-logger",
"syslog",
"udp-logger",
"file-logger",
"clickhouse-logger",
"tencent-cloud-cls",
"inspect",
"example-plugin",
"aws-lambda",
"azure-functions",
"openwhisk",
"openfunction",
"serverless-post-function",
"ext-plugin-post-req",
"ext-plugin-post-resp",
"ai-request-rewrite",
},
stream_plugins = { "ip-restriction", "limit-conn", "mqtt-proxy", "syslog" },
plugin_attr = {
["log-rotate"] = {
timeout = 10000,
interval = 3600,
max_kept = 168,
max_size = -1,
enable_compression = false
},
skywalking = {
service_name = "APISIX",
service_instance_name = "APISIX Instance Name",
endpoint_addr = "http://127.0.0.1:12800",
report_interval = 3
},
opentelemetry = {
trace_id_source = "x-request-id",
resource = {
["service.name"] = "APISIX"
},
collector = {
address = "127.0.0.1:4318",
request_timeout = 3,
request_headers = {
Authorization = "token"
}
},
batch_span_processor = {
drop_on_queue_full = false,
max_queue_size = 1024,
batch_timeout = 2,
inactive_timeout = 1,
max_export_batch_size = tonumber(os.getenv("OTEL_BSP_MAX_EXPORT_BATCH_SIZE")) or 16
},
set_ngx_var = false
},
prometheus = {
export_uri = "/apisix/prometheus/metrics",
metric_prefix = "apisix_",
enable_export_server = true,
export_addr = {
ip = "127.0.0.1",
port = 9091
}
},
["server-info"] = {
report_ttl = 60
},
["dubbo-proxy"] = {
upstream_multiplex_count = 32
},
["proxy-mirror"] = {
timeout = {
connect = "60s",
read = "60s",
send = "60s"
}
},
inspect = {
delay = 3,
hooks_file = "/usr/local/apisix/plugin_inspect_hooks.lua"
},
zipkin = {
set_ngx_var = false
}
},
deployment = {
role = "traditional",
role_traditional = {
config_provider = "etcd"
},
admin = {
admin_key_required = true,
admin_key = {
{
name = "admin",
key = "",
role = "admin"
}
},
enable_admin_cors = true,
enable_admin_ui = true,
allow_admin = { "127.0.0.0/24" },
admin_listen = {
ip = "0.0.0.0",
port = 9180
},
admin_api_version = "v3"
},
etcd = {
host = { "http://127.0.0.1:2379" },
prefix = "/apisix",
timeout = 30,
watch_timeout = 50,
startup_retry = 2,
tls = {
verify = true
}
}
}
}
return _M

View File

@@ -0,0 +1,115 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local require = require
local util = require("apisix.cli.util")
local pcall = pcall
local error = error
local exit = os.exit
local stderr = io.stderr
local str_find = string.find
local arg = arg
local package = package
local tonumber = tonumber
return function (apisix_home, pkg_cpath_org, pkg_path_org)
-- ulimit setting should be checked when APISIX starts
local res, err = util.execute_cmd("ulimit -n")
if not res then
error("failed to exec ulimit cmd \'ulimit -n \', err: " .. err)
end
local trimed_res = util.trim(res)
local ulimit = trimed_res == "unlimited" and trimed_res or tonumber(trimed_res)
if not ulimit then
error("failed to fetch current maximum number of open file descriptors")
end
-- only for developer, use current folder as working space
local is_root_path = false
local script_path = arg[0]
if script_path:sub(1, 2) == './' then
apisix_home = util.trim(util.execute_cmd("pwd"))
if not apisix_home then
error("failed to fetch current path")
end
-- determine whether the current path is under the "/root" folder.
-- "/root/" is the root folder flag.
if str_find(apisix_home .. "/", '/root/', nil, true) == 1 then
is_root_path = true
end
local pkg_cpath = apisix_home .. "/deps/lib64/lua/5.1/?.so;"
.. apisix_home .. "/deps/lib/lua/5.1/?.so;"
local pkg_path = apisix_home .. "/?/init.lua;"
.. apisix_home .. "/deps/share/lua/5.1/?/init.lua;"
.. apisix_home .. "/deps/share/lua/5.1/?.lua;;"
package.cpath = pkg_cpath .. package.cpath
package.path = pkg_path .. package.path
end
do
-- skip luajit environment
local ok = pcall(require, "table.new")
if not ok then
local ok, json = pcall(require, "cjson")
if ok and json then
stderr:write("please remove the cjson library in Lua, it may "
.. "conflict with the cjson library in openresty. "
.. "\n luarocks remove lua-cjson\n")
exit(1)
end
end
end
-- pre-transform openresty path
res, err = util.execute_cmd("command -v openresty")
if not res then
error("failed to exec cmd \'command -v openresty\', err: " .. err)
end
local openresty_path_abs = util.trim(res)
local openresty_args = openresty_path_abs .. [[ -p ]] .. apisix_home .. [[ -c ]]
.. apisix_home .. [[/conf/nginx.conf]]
local or_info, err = util.execute_cmd("openresty -V 2>&1")
if not or_info then
error("failed to exec cmd \'openresty -V 2>&1\', err: " .. err)
end
local use_apisix_base = true
if not or_info:find("apisix-nginx-module", 1, true) then
use_apisix_base = false
end
local min_etcd_version = "3.4.0"
return {
apisix_home = apisix_home,
is_root_path = is_root_path,
openresty_args = openresty_args,
openresty_info = or_info,
use_apisix_base = use_apisix_base,
pkg_cpath_org = pkg_cpath_org,
pkg_path_org = pkg_path_org,
min_etcd_version = min_etcd_version,
ulimit = ulimit,
}
end

View File

@@ -0,0 +1,405 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local base64_encode = require("base64").encode
local dkjson = require("dkjson")
local constants = require("apisix.constants")
local util = require("apisix.cli.util")
local file = require("apisix.cli.file")
local http = require("socket.http")
local https = require("ssl.https")
local ltn12 = require("ltn12")
local type = type
local ipairs = ipairs
local pairs = pairs
local print = print
local tonumber = tonumber
local str_format = string.format
local str_sub = string.sub
local table_concat = table.concat
local table_insert = table.insert
local io_stderr = io.stderr
local _M = {}
-- Timeout for all I/O operations
http.TIMEOUT = 3
local function parse_semantic_version(ver)
local errmsg = "invalid semantic version: " .. ver
local parts = util.split(ver, "-")
if #parts > 2 then
return nil, errmsg
end
if #parts == 2 then
ver = parts[1]
end
local fields = util.split(ver, ".")
if #fields ~= 3 then
return nil, errmsg
end
local major = tonumber(fields[1])
local minor = tonumber(fields[2])
local patch = tonumber(fields[3])
if not (major and minor and patch) then
return nil, errmsg
end
return {
major = major,
minor = minor,
patch = patch,
}
end
local function compare_semantic_version(v1, v2)
local ver1, err = parse_semantic_version(v1)
if not ver1 then
return nil, err
end
local ver2, err = parse_semantic_version(v2)
if not ver2 then
return nil, err
end
if ver1.major ~= ver2.major then
return ver1.major < ver2.major
end
if ver1.minor ~= ver2.minor then
return ver1.minor < ver2.minor
end
return ver1.patch < ver2.patch
end
local function request(url, yaml_conf)
local response_body = {}
local single_request = false
if type(url) == "string" then
url = {
url = url,
method = "GET",
sink = ltn12.sink.table(response_body),
}
single_request = true
end
local res, code
if str_sub(url.url, 1, 8) == "https://" then
local verify = "peer"
if yaml_conf.etcd.tls then
local cfg = yaml_conf.etcd.tls
if cfg.verify == false then
verify = "none"
end
url.certificate = cfg.cert
url.key = cfg.key
local apisix_ssl = yaml_conf.apisix.ssl
if apisix_ssl and apisix_ssl.ssl_trusted_certificate then
url.cafile = apisix_ssl.ssl_trusted_certificate
end
end
url.verify = verify
res, code = https.request(url)
else
res, code = http.request(url)
end
-- In case of failure, request returns nil followed by an error message.
-- Else the first return value is the response body
-- and followed by the response status code.
if single_request and res ~= nil then
return table_concat(response_body), code
end
return res, code
end
local function prepare_dirs_via_http(yaml_conf, args, index, host, host_count)
local is_success = true
local errmsg
local auth_token
local user = yaml_conf.etcd.user
local password = yaml_conf.etcd.password
if user and password then
local auth_url = host .. "/v3/auth/authenticate"
local json_auth = {
name = user,
password = password
}
local post_json_auth = dkjson.encode(json_auth)
local response_body = {}
local res, err
local retry_time = 0
while retry_time < 2 do
res, err = request({
url = auth_url,
method = "POST",
source = ltn12.source.string(post_json_auth),
sink = ltn12.sink.table(response_body),
headers = {
["Content-Length"] = #post_json_auth
}
}, yaml_conf)
-- In case of failure, request returns nil followed by an error message.
-- Else the first return value is just the number 1
-- and followed by the response status code.
if res then
break
end
retry_time = retry_time + 1
print(str_format("Warning! Request etcd endpoint \'%s\' error, %s, retry time=%s",
auth_url, err, retry_time))
end
if not res then
errmsg = str_format("request etcd endpoint \"%s\" error, %s\n", auth_url, err)
util.die(errmsg)
end
local res_auth = table_concat(response_body)
local body_auth, _, err_auth = dkjson.decode(res_auth)
if err_auth or (body_auth and not body_auth["token"]) then
errmsg = str_format("got malformed auth message: \"%s\" from etcd \"%s\"\n",
res_auth, auth_url)
util.die(errmsg)
end
auth_token = body_auth.token
end
local dirs = {}
for name in pairs(constants.HTTP_ETCD_DIRECTORY) do
dirs[name] = true
end
for name in pairs(constants.STREAM_ETCD_DIRECTORY) do
dirs[name] = true
end
for dir_name in pairs(dirs) do
local key = (yaml_conf.etcd.prefix or "") .. dir_name .. "/"
local put_url = host .. "/v3/kv/put"
local post_json = '{"value":"' .. base64_encode("init_dir")
.. '", "key":"' .. base64_encode(key) .. '"}'
local response_body = {}
local headers = {["Content-Length"] = #post_json}
if auth_token then
headers["Authorization"] = auth_token
end
local res, err
local retry_time = 0
while retry_time < 2 do
res, err = request({
url = put_url,
method = "POST",
source = ltn12.source.string(post_json),
sink = ltn12.sink.table(response_body),
headers = headers
}, yaml_conf)
retry_time = retry_time + 1
if res then
break
end
print(str_format("Warning! Request etcd endpoint \'%s\' error, %s, retry time=%s",
put_url, err, retry_time))
end
if not res then
errmsg = str_format("request etcd endpoint \"%s\" error, %s\n", put_url, err)
util.die(errmsg)
end
local res_put = table_concat(response_body)
if res_put:find("404 page not found", 1, true) then
errmsg = str_format("gRPC gateway is not enabled in etcd cluster \"%s\",",
"which is required by Apache APISIX\n")
util.die(errmsg)
end
if res_put:find("CommonName of client sending a request against gateway", 1, true) then
errmsg = str_format("etcd \"client-cert-auth\" cannot be used with gRPC-gateway, "
.. "please configure the etcd username and password "
.. "in configuration file\n")
util.die(errmsg)
end
if res_put:find("error", 1, true) then
is_success = false
if (index == host_count) then
errmsg = str_format("got malformed key-put message: \"%s\" from etcd \"%s\"\n",
res_put, put_url)
util.die(errmsg)
end
break
end
if args and args["verbose"] then
print(res_put)
end
end
return is_success
end
local function prepare_dirs(yaml_conf, args, index, host, host_count)
return prepare_dirs_via_http(yaml_conf, args, index, host, host_count)
end
function _M.init(env, args)
-- read_yaml_conf
local yaml_conf, err = file.read_yaml_conf(env.apisix_home)
if not yaml_conf then
util.die("failed to read local yaml config of apisix: ", err)
end
if not yaml_conf.apisix then
util.die("failed to read `apisix` field from yaml file when init etcd")
end
if yaml_conf.deployment.config_provider ~= "etcd" then
return true
end
if not yaml_conf.etcd then
util.die("failed to read `etcd` field from yaml file when init etcd")
end
-- convert old single etcd config to multiple etcd config
if type(yaml_conf.etcd.host) == "string" then
yaml_conf.etcd.host = {yaml_conf.etcd.host}
end
local host_count = #(yaml_conf.etcd.host)
local scheme
for i = 1, host_count do
local host = yaml_conf.etcd.host[i]
local fields = util.split(host, "://")
if not fields then
util.die("malformed etcd endpoint: ", host, "\n")
end
if not scheme then
scheme = fields[1]
elseif scheme ~= fields[1] then
print([[WARNING: mixed protocols among etcd endpoints]])
end
end
-- check the etcd cluster version
local etcd_healthy_hosts = {}
for index, host in ipairs(yaml_conf.etcd.host) do
local version_url = host .. "/version"
local errmsg
local res, err
local retry_time = 0
local etcd = yaml_conf.etcd
local max_retry = tonumber(etcd.startup_retry) or 2
while retry_time < max_retry do
res, err = request(version_url, yaml_conf)
-- In case of failure, request returns nil followed by an error message.
-- Else the first return value is the response body
-- and followed by the response status code.
if res then
break
end
retry_time = retry_time + 1
print(str_format("Warning! Request etcd endpoint \'%s\' error, %s, retry time=%s",
version_url, err, retry_time))
end
if res then
local body, _, err = dkjson.decode(res)
if err or (body and not body["etcdcluster"]) then
errmsg = str_format("got malformed version message: \"%s\" from etcd \"%s\"\n", res,
version_url)
util.die(errmsg)
end
local cluster_version = body["etcdcluster"]
if compare_semantic_version(cluster_version, env.min_etcd_version) then
util.die("etcd cluster version ", cluster_version,
" is less than the required version ", env.min_etcd_version,
", please upgrade your etcd cluster\n")
end
table_insert(etcd_healthy_hosts, host)
else
io_stderr:write(str_format("request etcd endpoint \'%s\' error, %s\n", version_url,
err))
end
end
if #etcd_healthy_hosts <= 0 then
util.die("all etcd nodes are unavailable\n")
end
if (#etcd_healthy_hosts / host_count * 100) <= 50 then
util.die("the etcd cluster needs at least 50% and above healthy nodes\n")
end
-- access from the data plane to etcd should be read-only.
-- data plane writes to etcd may cause security issues.
if yaml_conf.deployment.role == "data_plane" then
print("access from the data plane to etcd should be read-only, "
.."skip initializing the data of etcd")
return true
end
print("trying to initialize the data of etcd")
local etcd_ok = false
for index, host in ipairs(etcd_healthy_hosts) do
if prepare_dirs(yaml_conf, args, index, host, host_count) then
etcd_ok = true
break
end
end
if not etcd_ok then
util.die("none of the configured etcd works well\n")
end
end
return _M

View File

@@ -0,0 +1,343 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local ngx = ngx
local yaml = require("lyaml")
local profile = require("apisix.core.profile")
local util = require("apisix.cli.util")
local schema = require("apisix.cli.schema")
local default_conf = require("apisix.cli.config")
local dkjson = require("dkjson")
local pl_path = require("pl.path")
local pairs = pairs
local type = type
local tonumber = tonumber
local getenv = os.getenv
local str_gmatch = string.gmatch
local str_find = string.find
local str_sub = string.sub
local print = print
local _M = {}
local exported_vars
function _M.get_exported_vars()
return exported_vars
end
local function is_empty_yaml_line(line)
return line == '' or str_find(line, '^%s*$') or str_find(line, '^%s*#')
end
local function tab_is_array(t)
local count = 0
for k, v in pairs(t) do
count = count + 1
end
return #t == count
end
local function var_sub(val)
local err
local var_used = false
-- we use '${{var}}' because '$var' and '${var}' are taken
-- by Nginx
local new_val = val:gsub("%$%{%{%s*([%w_]+[%:%=]?.-)%s*%}%}", function(var)
local i, j = var:find("%:%=")
local default
if i and j then
default = var:sub(i + 2, #var)
default = default:gsub('^%s*(.-)%s*$', '%1')
var = var:sub(1, i - 1)
end
local v = getenv(var) or default
if v then
if not exported_vars then
exported_vars = {}
end
exported_vars[var] = v
var_used = true
return v
end
err = "failed to handle configuration: " ..
"can't find environment variable " .. var
return ""
end)
return new_val, var_used, err
end
local function resolve_conf_var(conf)
local new_keys = {}
for key, val in pairs(conf) do
-- avoid re-iterating the table for already iterated key
if new_keys[key] then
goto continue
end
-- substitute environment variables from conf keys
if type(key) == "string" then
local new_key, _, err = var_sub(key)
if err then
return nil, err
end
if new_key ~= key then
new_keys[new_key] = "dummy" -- we only care about checking the key
conf.key = nil
conf[new_key] = val
key = new_key
end
end
if type(val) == "table" then
local ok, err = resolve_conf_var(val)
if not ok then
return nil, err
end
elseif type(val) == "string" then
local new_val, var_used, err = var_sub(val)
if err then
return nil, err
end
if var_used then
if tonumber(new_val) ~= nil then
new_val = tonumber(new_val)
elseif new_val == "true" then
new_val = true
elseif new_val == "false" then
new_val = false
end
end
conf[key] = new_val
end
::continue::
end
return true
end
_M.resolve_conf_var = resolve_conf_var
local function replace_by_reserved_env_vars(conf)
-- TODO: support more reserved environment variables
local v = getenv("APISIX_DEPLOYMENT_ETCD_HOST")
if v and conf["deployment"] and conf["deployment"]["etcd"] then
local val, _, err = dkjson.decode(v)
if err or not val then
print("parse ${APISIX_DEPLOYMENT_ETCD_HOST} failed, error:", err)
return
end
conf["deployment"]["etcd"]["host"] = val
end
end
local function path_is_multi_type(path, type_val)
if str_sub(path, 1, 14) == "nginx_config->" and
(type_val == "number" or type_val == "string") then
return true
end
if path == "apisix->node_listen" and type_val == "number" then
return true
end
if path == "apisix->data_encryption->keyring" then
return true
end
return false
end
local function merge_conf(base, new_tab, ppath)
ppath = ppath or ""
for key, val in pairs(new_tab) do
if type(val) == "table" then
if val == yaml.null then
base[key] = nil
elseif tab_is_array(val) then
base[key] = val
else
if base[key] == nil then
base[key] = {}
end
local ok, err = merge_conf(
base[key],
val,
ppath == "" and key or ppath .. "->" .. key
)
if not ok then
return nil, err
end
end
else
local type_val = type(val)
if base[key] == nil then
base[key] = val
elseif type(base[key]) ~= type_val then
local path = ppath == "" and key or ppath .. "->" .. key
if path_is_multi_type(path, type_val) then
base[key] = val
else
return nil, "failed to merge, path[" .. path .. "] expect: " ..
type(base[key]) .. ", but got: " .. type_val
end
else
base[key] = val
end
end
end
return base
end
function _M.read_yaml_conf(apisix_home)
if apisix_home then
profile.apisix_home = apisix_home .. "/"
end
local local_conf_path = profile:customized_yaml_path()
if not local_conf_path then
local_conf_path = profile:yaml_path("config")
end
local user_conf_yaml, err = util.read_file(local_conf_path)
if not user_conf_yaml then
return nil, err
end
local is_empty_file = true
for line in str_gmatch(user_conf_yaml .. '\n', '(.-)\r?\n') do
if not is_empty_yaml_line(line) then
is_empty_file = false
break
end
end
if not is_empty_file then
local user_conf = yaml.load(user_conf_yaml)
if not user_conf then
return nil, "invalid config.yaml file"
end
local ok, err = resolve_conf_var(user_conf)
if not ok then
return nil, err
end
ok, err = merge_conf(default_conf, user_conf)
if not ok then
return nil, err
end
end
-- fill the default value by the schema
local ok, err = schema.validate(default_conf)
if not ok then
return nil, err
end
if default_conf.deployment then
default_conf.deployment.config_provider = "etcd"
if default_conf.deployment.role == "traditional" then
default_conf.etcd = default_conf.deployment.etcd
if default_conf.deployment.role_traditional.config_provider == "yaml" then
default_conf.deployment.config_provider = "yaml"
end
elseif default_conf.deployment.role == "control_plane" then
default_conf.etcd = default_conf.deployment.etcd
default_conf.apisix.enable_admin = true
elseif default_conf.deployment.role == "data_plane" then
default_conf.etcd = default_conf.deployment.etcd
if default_conf.deployment.role_data_plane.config_provider == "yaml" then
default_conf.deployment.config_provider = "yaml"
elseif default_conf.deployment.role_data_plane.config_provider == "json" then
default_conf.deployment.config_provider = "json"
elseif default_conf.deployment.role_data_plane.config_provider == "xds" then
default_conf.deployment.config_provider = "xds"
end
default_conf.apisix.enable_admin = false
end
end
--- using `not ngx` to check whether the current execution environment is apisix cli module,
--- because it is only necessary to parse and validate `apisix.yaml` in apisix cli.
if default_conf.deployment.config_provider == "yaml" and not ngx then
local apisix_conf_path = profile:yaml_path("apisix")
local apisix_conf_yaml, _ = util.read_file(apisix_conf_path)
if apisix_conf_yaml then
local apisix_conf = yaml.load(apisix_conf_yaml)
if apisix_conf then
local ok, err = resolve_conf_var(apisix_conf)
if not ok then
return nil, err
end
end
end
end
local apisix_ssl = default_conf.apisix.ssl
if apisix_ssl and apisix_ssl.ssl_trusted_certificate then
-- default value is set to "system" during schema validation
if apisix_ssl.ssl_trusted_certificate == "system" then
local trusted_certs_path, err = util.get_system_trusted_certs_filepath()
if not trusted_certs_path then
util.die(err)
end
apisix_ssl.ssl_trusted_certificate = trusted_certs_path
else
-- During validation, the path is relative to PWD
-- When Nginx starts, the path is relative to conf
-- Therefore we need to check the absolute version instead
local cert_path = pl_path.abspath(apisix_ssl.ssl_trusted_certificate)
if not pl_path.exists(cert_path) then
util.die("certificate path", cert_path, "doesn't exist\n")
end
apisix_ssl.ssl_trusted_certificate = cert_path
end
end
replace_by_reserved_env_vars(default_conf)
return default_conf
end
return _M

View File

@@ -0,0 +1,66 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- IP match and verify module.
--
-- @module cli.ip
local mediador_ip = require("resty.mediador.ip")
local setmetatable = setmetatable
local _M = {}
local mt = { __index = _M }
---
-- create a instance of module cli.ip
--
-- @function cli.ip:new
-- @tparam string ip IP or CIDR.
-- @treturn instance of module if the given ip valid, nil and error message otherwise.
function _M.new(self, ip)
if not mediador_ip.valid(ip) then
return nil, "invalid ip"
end
local _ip = mediador_ip.parse(ip)
return setmetatable({ _ip = _ip }, mt)
end
---
-- Is that the given ip loopback?
--
-- @function cli.ip:is_loopback
-- @treturn boolean True if the given ip is the loopback, false otherwise.
function _M.is_loopback(self)
return self._ip and "loopback" == self._ip:range()
end
---
-- Is that the given ip unspecified?
--
-- @function cli.ip:is_unspecified
-- @treturn boolean True if the given ip is all the unspecified, false otherwise.
function _M.is_unspecified(self)
return self._ip and "unspecified" == self._ip:range()
end
return _M

View File

@@ -0,0 +1,998 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
return [=[
# Configuration File - Nginx Server Configs
# This is a read-only file, do not try to modify it.
{% if user and user ~= '' then %}
user {* user *};
{% end %}
master_process on;
worker_processes {* worker_processes *};
{% if os_name == "Linux" and enable_cpu_affinity == true then %}
worker_cpu_affinity auto;
{% end %}
# main configuration snippet starts
{% if main_configuration_snippet then %}
{* main_configuration_snippet *}
{% end %}
# main configuration snippet ends
error_log {* error_log *} {* error_log_level or "warn" *};
pid logs/nginx.pid;
worker_rlimit_nofile {* worker_rlimit_nofile *};
events {
accept_mutex off;
worker_connections {* event.worker_connections *};
}
worker_rlimit_core {* worker_rlimit_core *};
worker_shutdown_timeout {* worker_shutdown_timeout *};
env APISIX_PROFILE;
env PATH; # for searching external plugin runner's binary
# reserved environment variables for configuration
env APISIX_DEPLOYMENT_ETCD_HOST;
{% if envs then %}
{% for _, name in ipairs(envs) do %}
env {*name*};
{% end %}
{% end %}
{% if use_apisix_base then %}
thread_pool grpc-client-nginx-module threads=1;
lua {
{% if enabled_stream_plugins["prometheus"] then %}
lua_shared_dict prometheus-metrics {* meta.lua_shared_dict["prometheus-metrics"] *};
{% end %}
{% if standalone_with_admin_api then %}
lua_shared_dict standalone-config {* meta.lua_shared_dict["standalone-config"] *};
{% end %}
{% if status then %}
lua_shared_dict status-report {* meta.lua_shared_dict["status-report"] *};
{% end %}
lua_shared_dict nacos 10m;
}
{% if enabled_stream_plugins["prometheus"] and not enable_http then %}
http {
lua_package_path "{*extra_lua_path*}$prefix/deps/share/lua/5.1/?.lua;$prefix/deps/share/lua/5.1/?/init.lua;]=]
.. [=[{*apisix_lua_home*}/?.lua;{*apisix_lua_home*}/?/init.lua;;{*lua_path*};";
lua_package_cpath "{*extra_lua_cpath*}$prefix/deps/lib64/lua/5.1/?.so;]=]
.. [=[$prefix/deps/lib/lua/5.1/?.so;;]=]
.. [=[{*lua_cpath*};";
{% if enabled_stream_plugins["prometheus"] then %}
init_by_lua_block {
require "resty.core"
local process = require("ngx.process")
local ok, err = process.enable_privileged_agent()
if not ok then
ngx.log(ngx.ERR, "failed to enable privileged_agent: ", err)
end
}
init_worker_by_lua_block {
require("apisix.plugins.prometheus.exporter").http_init(true)
}
server {
{% if use_apisix_base then %}
listen {* prometheus_server_addr *} enable_process=privileged_agent;
{% else %}
listen {* prometheus_server_addr *};
{% end %}
access_log off;
location / {
content_by_lua_block {
local prometheus = require("apisix.plugins.prometheus.exporter")
prometheus.export_metrics(true)
}
}
location = /apisix/nginx_status {
allow 127.0.0.0/24;
deny all;
stub_status;
}
}
{% end %}
}
{% end %}
{% end %}
{% if enable_stream then %}
stream {
lua_package_path "{*extra_lua_path*}$prefix/deps/share/lua/5.1/?.lua;$prefix/deps/share/lua/5.1/?/init.lua;]=]
.. [=[{*apisix_lua_home*}/?.lua;{*apisix_lua_home*}/?/init.lua;;{*lua_path*};";
lua_package_cpath "{*extra_lua_cpath*}$prefix/deps/lib64/lua/5.1/?.so;]=]
.. [=[$prefix/deps/lib/lua/5.1/?.so;;]=]
.. [=[{*lua_cpath*};";
lua_socket_log_errors off;
{% if max_pending_timers then %}
lua_max_pending_timers {* max_pending_timers *};
{% end %}
{% if max_running_timers then %}
lua_max_running_timers {* max_running_timers *};
{% end %}
lua_shared_dict lrucache-lock-stream {* stream.lua_shared_dict["lrucache-lock-stream"] *};
lua_shared_dict etcd-cluster-health-check-stream {* stream.lua_shared_dict["etcd-cluster-health-check-stream"] *};
lua_shared_dict worker-events-stream {* stream.lua_shared_dict["worker-events-stream"] *};
{% if stream.lua_shared_dict["upstream-healthcheck-stream"] then %}
lua_shared_dict upstream-healthcheck-stream {* stream.lua_shared_dict["upstream-healthcheck-stream"] *};
{% end %}
{% if enabled_discoveries["tars"] then %}
lua_shared_dict tars-stream {* stream.lua_shared_dict["tars-stream"] *};
{% end %}
{% if enabled_stream_plugins["limit-conn"] then %}
lua_shared_dict plugin-limit-conn-stream {* stream.lua_shared_dict["plugin-limit-conn-stream"] *};
{% end %}
# for discovery shared dict
{% if discovery_shared_dicts then %}
{% for key, size in pairs(discovery_shared_dicts) do %}
lua_shared_dict {*key*}-stream {*size*};
{% end %}
{% end %}
resolver {% for _, dns_addr in ipairs(dns_resolver or {}) do %} {*dns_addr*} {% end %} {% if dns_resolver_valid then %} valid={*dns_resolver_valid*}{% end %} ipv6={% if enable_ipv6 then %}on{% else %}off{% end %};
resolver_timeout {*resolver_timeout*};
{% if ssl.ssl_trusted_certificate ~= nil then %}
lua_ssl_trusted_certificate {* ssl.ssl_trusted_certificate *};
{% end %}
# for stream logs, off by default
{% if stream.enable_access_log == true then %}
log_format main escape={* stream.access_log_format_escape *} '{* stream.access_log_format *}';
access_log {* stream.access_log *} main buffer=16384 flush=3;
{% end %}
# stream configuration snippet starts
{% if stream_configuration_snippet then %}
{* stream_configuration_snippet *}
{% end %}
# stream configuration snippet ends
upstream apisix_backend {
server 127.0.0.1:80;
balancer_by_lua_block {
apisix.stream_balancer_phase()
}
}
init_by_lua_block {
require "resty.core"
{% if lua_module_hook then %}
require "{* lua_module_hook *}"
{% end %}
apisix = require("apisix")
local dns_resolver = { {% for _, dns_addr in ipairs(dns_resolver or {}) do %} "{*dns_addr*}", {% end %} }
local args = {
dns_resolver = dns_resolver,
}
apisix.stream_init(args)
}
init_worker_by_lua_block {
apisix.stream_init_worker()
}
{% if (events.module or "") == "lua-resty-events" then %}
# the server block for lua-resty-events
server {
listen unix:{*apisix_lua_home*}/logs/stream_worker_events.sock;
access_log off;
content_by_lua_block {
require("resty.events.compat").run()
}
}
{% end %}
server {
{% for _, item in ipairs(stream_proxy.tcp or {}) do %}
listen {*item.addr*} {% if item.tls then %} ssl {% end %} {% if enable_reuseport then %} reuseport {% end %} {% if proxy_protocol and proxy_protocol.enable_tcp_pp then %} proxy_protocol {% end %};
{% end %}
{% for _, addr in ipairs(stream_proxy.udp or {}) do %}
listen {*addr*} udp {% if enable_reuseport then %} reuseport {% end %};
{% end %}
{% if tcp_enable_ssl then %}
ssl_certificate {* ssl.ssl_cert *};
ssl_certificate_key {* ssl.ssl_cert_key *};
ssl_client_hello_by_lua_block {
apisix.ssl_client_hello_phase()
}
ssl_certificate_by_lua_block {
apisix.ssl_phase()
}
{% end %}
{% if proxy_protocol and proxy_protocol.enable_tcp_pp_to_upstream then %}
proxy_protocol on;
{% end %}
preread_by_lua_block {
apisix.stream_preread_phase()
}
proxy_pass apisix_backend;
{% if use_apisix_base then %}
set $upstream_sni "apisix_backend";
proxy_ssl_server_name on;
proxy_ssl_name $upstream_sni;
{% end %}
log_by_lua_block {
apisix.stream_log_phase()
}
}
}
{% end %}
{% if enable_http then %}
http {
# put extra_lua_path in front of the builtin path
# so user can override the source code
lua_package_path "{*extra_lua_path*}$prefix/deps/share/lua/5.1/?.lua;$prefix/deps/share/lua/5.1/?/init.lua;]=]
.. [=[{*apisix_lua_home*}/?.lua;{*apisix_lua_home*}/?/init.lua;;{*lua_path*};";
lua_package_cpath "{*extra_lua_cpath*}$prefix/deps/lib64/lua/5.1/?.so;]=]
.. [=[$prefix/deps/lib/lua/5.1/?.so;;]=]
.. [=[{*lua_cpath*};";
{% if max_pending_timers then %}
lua_max_pending_timers {* max_pending_timers *};
{% end %}
{% if max_running_timers then %}
lua_max_running_timers {* max_running_timers *};
{% end %}
lua_shared_dict internal-status {* http.lua_shared_dict["internal-status"] *};
lua_shared_dict upstream-healthcheck {* http.lua_shared_dict["upstream-healthcheck"] *};
lua_shared_dict worker-events {* http.lua_shared_dict["worker-events"] *};
lua_shared_dict lrucache-lock {* http.lua_shared_dict["lrucache-lock"] *};
lua_shared_dict balancer-ewma {* http.lua_shared_dict["balancer-ewma"] *};
lua_shared_dict balancer-ewma-locks {* http.lua_shared_dict["balancer-ewma-locks"] *};
lua_shared_dict balancer-ewma-last-touched-at {* http.lua_shared_dict["balancer-ewma-last-touched-at"] *};
lua_shared_dict etcd-cluster-health-check {* http.lua_shared_dict["etcd-cluster-health-check"] *}; # etcd health check
# for discovery shared dict
{% if discovery_shared_dicts then %}
{% for key, size in pairs(discovery_shared_dicts) do %}
lua_shared_dict {*key*} {*size*};
{% end %}
{% end %}
{% if enabled_discoveries["tars"] then %}
lua_shared_dict tars {* http.lua_shared_dict["tars"] *};
{% end %}
{% if http.lua_shared_dict["plugin-ai-rate-limiting"] then %}
lua_shared_dict plugin-ai-rate-limiting {* http.lua_shared_dict["plugin-ai-rate-limiting"] *};
{% else %}
lua_shared_dict plugin-ai-rate-limiting 10m;
{% end %}
{% if http.lua_shared_dict["plugin-ai-rate-limiting"] then %}
lua_shared_dict plugin-ai-rate-limiting-reset-header {* http.lua_shared_dict["plugin-ai-rate-limiting-reset-header"] *};
{% else %}
lua_shared_dict plugin-ai-rate-limiting-reset-header 10m;
{% end %}
{% if enabled_plugins["limit-conn"] then %}
lua_shared_dict plugin-limit-conn {* http.lua_shared_dict["plugin-limit-conn"] *};
lua_shared_dict plugin-limit-conn-redis-cluster-slot-lock {* http.lua_shared_dict["plugin-limit-conn-redis-cluster-slot-lock"] *};
{% end %}
{% if enabled_plugins["limit-req"] then %}
lua_shared_dict plugin-limit-req-redis-cluster-slot-lock {* http.lua_shared_dict["plugin-limit-req-redis-cluster-slot-lock"] *};
lua_shared_dict plugin-limit-req {* http.lua_shared_dict["plugin-limit-req"] *};
{% end %}
{% if enabled_plugins["limit-count"] then %}
lua_shared_dict plugin-limit-count {* http.lua_shared_dict["plugin-limit-count"] *};
lua_shared_dict plugin-limit-count-redis-cluster-slot-lock {* http.lua_shared_dict["plugin-limit-count-redis-cluster-slot-lock"] *};
lua_shared_dict plugin-limit-count-reset-header {* http.lua_shared_dict["plugin-limit-count"] *};
{% end %}
{% if enabled_plugins["prometheus"] and not enabled_stream_plugins["prometheus"] then %}
lua_shared_dict prometheus-metrics {* http.lua_shared_dict["prometheus-metrics"] *};
{% end %}
{% if enabled_plugins["skywalking"] then %}
lua_shared_dict tracing_buffer {* http.lua_shared_dict.tracing_buffer *}; # plugin: skywalking
{% end %}
{% if enabled_plugins["api-breaker"] then %}
lua_shared_dict plugin-api-breaker {* http.lua_shared_dict["plugin-api-breaker"] *};
{% end %}
{% if enabled_plugins["openid-connect"] or enabled_plugins["authz-keycloak"] then %}
# for openid-connect and authz-keycloak plugin
lua_shared_dict discovery {* http.lua_shared_dict["discovery"] *}; # cache for discovery metadata documents
{% end %}
{% if enabled_plugins["openid-connect"] then %}
# for openid-connect plugin
lua_shared_dict jwks {* http.lua_shared_dict["jwks"] *}; # cache for JWKs
lua_shared_dict introspection {* http.lua_shared_dict["introspection"] *}; # cache for JWT verification results
{% end %}
{% if enabled_plugins["cas-auth"] then %}
lua_shared_dict cas_sessions {* http.lua_shared_dict["cas-auth"] *};
{% end %}
{% if enabled_plugins["authz-keycloak"] then %}
# for authz-keycloak
lua_shared_dict access-tokens {* http.lua_shared_dict["access-tokens"] *}; # cache for service account access tokens
{% end %}
{% if enabled_plugins["ocsp-stapling"] then %}
lua_shared_dict ocsp-stapling {* http.lua_shared_dict["ocsp-stapling"] *}; # cache for ocsp-stapling
{% end %}
{% if enabled_plugins["ext-plugin-pre-req"] or enabled_plugins["ext-plugin-post-req"] then %}
lua_shared_dict ext-plugin {* http.lua_shared_dict["ext-plugin"] *}; # cache for ext-plugin
{% end %}
{% if enabled_plugins["mcp-bridge"] then %}
lua_shared_dict mcp-session {* http.lua_shared_dict["mcp-session"] *}; # cache for mcp-session
{% end %}
{% if config_center == "xds" then %}
lua_shared_dict xds-config 10m;
lua_shared_dict xds-config-version 1m;
{% end %}
# for custom shared dict
{% if http.custom_lua_shared_dict then %}
{% for cache_key, cache_size in pairs(http.custom_lua_shared_dict) do %}
lua_shared_dict {*cache_key*} {*cache_size*};
{% end %}
{% end %}
{% if enabled_plugins["error-log-logger"] then %}
lua_capture_error_log 10m;
{% end %}
lua_ssl_verify_depth 5;
ssl_session_timeout 86400;
{% if http.underscores_in_headers then %}
underscores_in_headers {* http.underscores_in_headers *};
{%end%}
lua_socket_log_errors off;
resolver {% for _, dns_addr in ipairs(dns_resolver or {}) do %} {*dns_addr*} {% end %} {% if dns_resolver_valid then %} valid={*dns_resolver_valid*}{% end %} ipv6={% if enable_ipv6 then %}on{% else %}off{% end %};
resolver_timeout {*resolver_timeout*};
lua_http10_buffering off;
lua_regex_match_limit 100000;
lua_regex_cache_max_entries 8192;
{% if http.enable_access_log == false then %}
access_log off;
{% else %}
log_format main escape={* http.access_log_format_escape *} '{* http.access_log_format *}';
uninitialized_variable_warn off;
{% if http.access_log_buffer then %}
access_log {* http.access_log *} main buffer={* http.access_log_buffer *} flush=3;
{% else %}
access_log {* http.access_log *} main buffer=16384 flush=3;
{% end %}
{% end %}
open_file_cache max=1000 inactive=60;
client_max_body_size {* http.client_max_body_size *};
keepalive_timeout {* http.keepalive_timeout *};
client_header_timeout {* http.client_header_timeout *};
client_body_timeout {* http.client_body_timeout *};
send_timeout {* http.send_timeout *};
variables_hash_max_size {* http.variables_hash_max_size *};
server_tokens off;
include mime.types;
charset {* http.charset *};
{% if http.real_ip_header then %}
real_ip_header {* http.real_ip_header *};
{% end %}
{% if http.real_ip_recursive then %}
real_ip_recursive {* http.real_ip_recursive *};
{% end %}
{% if http.real_ip_from then %}
{% for _, real_ip in ipairs(http.real_ip_from) do %}
set_real_ip_from {*real_ip*};
{% end %}
{% end %}
{% if ssl.ssl_trusted_certificate ~= nil then %}
lua_ssl_trusted_certificate {* ssl.ssl_trusted_certificate *};
{% end %}
# http configuration snippet starts
{% if http_configuration_snippet then %}
{* http_configuration_snippet *}
{% end %}
# http configuration snippet ends
upstream apisix_backend {
server 0.0.0.1;
{% if use_apisix_base then %}
keepalive {* http.upstream.keepalive *};
keepalive_requests {* http.upstream.keepalive_requests *};
keepalive_timeout {* http.upstream.keepalive_timeout *};
# we put the static configuration above so that we can override it in the Lua code
balancer_by_lua_block {
apisix.http_balancer_phase()
}
{% else %}
balancer_by_lua_block {
apisix.http_balancer_phase()
}
keepalive {* http.upstream.keepalive *};
keepalive_requests {* http.upstream.keepalive_requests *};
keepalive_timeout {* http.upstream.keepalive_timeout *};
{% end %}
}
{% if enabled_plugins["dubbo-proxy"] then %}
upstream apisix_dubbo_backend {
server 0.0.0.1;
balancer_by_lua_block {
apisix.http_balancer_phase()
}
# dynamical keepalive doesn't work with dubbo as the connection here
# is managed by ngx_multi_upstream_module
multi {* dubbo_upstream_multiplex_count *};
keepalive {* http.upstream.keepalive *};
keepalive_requests {* http.upstream.keepalive_requests *};
keepalive_timeout {* http.upstream.keepalive_timeout *};
}
{% end %}
{% if use_apisix_base then %}
apisix_delay_client_max_body_check on;
apisix_mirror_on_demand on;
{% end %}
{% if wasm then %}
wasm_vm wasmtime;
{% end %}
init_by_lua_block {
require "resty.core"
{% if lua_module_hook then %}
require "{* lua_module_hook *}"
{% end %}
apisix = require("apisix")
local dns_resolver = { {% for _, dns_addr in ipairs(dns_resolver or {}) do %} "{*dns_addr*}", {% end %} }
local args = {
dns_resolver = dns_resolver,
}
apisix.http_init(args)
-- set apisix_lua_home into constants module
-- it may be used by plugins to determine the work path of apisix
local constants = require("apisix.constants")
constants.apisix_lua_home = "{*apisix_lua_home*}"
}
init_worker_by_lua_block {
apisix.http_init_worker()
}
exit_worker_by_lua_block {
apisix.http_exit_worker()
}
{% if (events.module or "") == "lua-resty-events" then %}
# the server block for lua-resty-events
server {
listen unix:{*apisix_lua_home*}/logs/worker_events.sock;
access_log off;
location / {
content_by_lua_block {
require("resty.events.compat").run()
}
}
}
{% end %}
{% if enable_control then %}
server {
listen {* control_server_addr *};
access_log off;
location / {
content_by_lua_block {
apisix.http_control()
}
}
}
{% end %}
{% if status then %}
server {
listen {* status_server_addr *} enable_process=privileged_agent;
access_log off;
location /status {
content_by_lua_block {
apisix.status()
}
}
location /status/ready {
content_by_lua_block {
apisix.status_ready()
}
}
}
{% end %}
{% if enabled_plugins["prometheus"] and prometheus_server_addr then %}
server {
{% if use_apisix_base then %}
listen {* prometheus_server_addr *} enable_process=privileged_agent;
{% else %}
listen {* prometheus_server_addr *};
{% end %}
access_log off;
location / {
content_by_lua_block {
local prometheus = require("apisix.plugins.prometheus.exporter")
prometheus.export_metrics()
}
}
location = /apisix/nginx_status {
allow 127.0.0.0/24;
deny all;
stub_status;
}
}
{% end %}
{% if enable_admin then %}
server {
{%if https_admin then%}
listen {* admin_server_addr *} ssl;
ssl_certificate {* admin_api_mtls.admin_ssl_cert *};
ssl_certificate_key {* admin_api_mtls.admin_ssl_cert_key *};
{%if admin_api_mtls.admin_ssl_ca_cert and admin_api_mtls.admin_ssl_ca_cert ~= "" then%}
ssl_verify_client on;
ssl_client_certificate {* admin_api_mtls.admin_ssl_ca_cert *};
{% end %}
ssl_session_cache shared:SSL:20m;
ssl_protocols {* ssl.ssl_protocols *};
ssl_ciphers {* ssl.ssl_ciphers *};
ssl_prefer_server_ciphers on;
{% if ssl.ssl_session_tickets then %}
ssl_session_tickets on;
{% else %}
ssl_session_tickets off;
{% end %}
{% else %}
listen {* admin_server_addr *};
{%end%}
log_not_found off;
# admin configuration snippet starts
{% if http_admin_configuration_snippet then %}
{* http_admin_configuration_snippet *}
{% end %}
# admin configuration snippet ends
set $upstream_scheme 'http';
set $upstream_host $http_host;
set $upstream_uri '';
{%if allow_admin then%}
{% for _, allow_ip in ipairs(allow_admin) do %}
allow {*allow_ip*};
{% end %}
deny all;
{%else%}
allow all;
{%end%}
location /apisix/admin {
content_by_lua_block {
apisix.http_admin()
}
}
{% if enable_admin_ui then %}
location = /ui {
return 301 /ui/;
}
location ^~ /ui/ {
rewrite ^/ui/(.*)$ /$1 break;
root {* apisix_lua_home *}/ui;
try_files $uri /index.html =404;
gzip on;
gzip_types text/css application/javascript application/json;
expires 7200s;
add_header Cache-Control "private,max-age=7200";
}
{% end %}
}
{% end %}
{% if deployment_role ~= "control_plane" then %}
{% if enabled_plugins["proxy-cache"] then %}
# for proxy cache
{% for _, cache in ipairs(proxy_cache.zones) do %}
{% if cache.disk_path and cache.cache_levels and cache.disk_size then %}
proxy_cache_path {* cache.disk_path *} levels={* cache.cache_levels *} keys_zone={* cache.name *}:{* cache.memory_size *} inactive=1d max_size={* cache.disk_size *} use_temp_path=off;
{% else %}
lua_shared_dict {* cache.name *} {* cache.memory_size *};
{% end %}
{% end %}
map $upstream_cache_zone $upstream_cache_zone_info {
{% for _, cache in ipairs(proxy_cache.zones) do %}
{% if cache.disk_path and cache.cache_levels and cache.disk_size then %}
{* cache.name *} {* cache.disk_path *},{* cache.cache_levels *};
{% end %}
{% end %}
}
{% end %}
server {
{% if enable_http2 then %}
http2 on;
{% end %}
{% if enable_http3_in_server_context then %}
http3 on;
{% end %}
{% for _, item in ipairs(node_listen) do %}
listen {* item.ip *}:{* item.port *} default_server {% if enable_reuseport then %} reuseport {% end %};
{% end %}
{% if ssl.enable then %}
{% for _, item in ipairs(ssl.listen) do %}
{% if item.enable_http3 then %}
listen {* item.ip *}:{* item.port *} quic default_server {% if enable_reuseport then %} reuseport {% end %};
listen {* item.ip *}:{* item.port *} ssl default_server;
{% else %}
listen {* item.ip *}:{* item.port *} ssl default_server {% if enable_reuseport then %} reuseport {% end %};
{% end %}
{% end %}
{% end %}
{% if proxy_protocol and proxy_protocol.listen_http_port then %}
listen {* proxy_protocol.listen_http_port *} default_server proxy_protocol;
{% end %}
{% if proxy_protocol and proxy_protocol.listen_https_port then %}
listen {* proxy_protocol.listen_https_port *} ssl default_server proxy_protocol;
{% end %}
server_name _;
{% if ssl.enable then %}
ssl_certificate {* ssl.ssl_cert *};
ssl_certificate_key {* ssl.ssl_cert_key *};
ssl_session_cache shared:SSL:20m;
ssl_session_timeout 10m;
ssl_protocols {* ssl.ssl_protocols *};
ssl_ciphers {* ssl.ssl_ciphers *};
ssl_prefer_server_ciphers on;
{% if ssl.ssl_session_tickets then %}
ssl_session_tickets on;
{% else %}
ssl_session_tickets off;
{% end %}
{% end %}
{% if ssl.ssl_trusted_certificate ~= nil then %}
proxy_ssl_trusted_certificate {* ssl.ssl_trusted_certificate *};
{% end %}
# opentelemetry_set_ngx_var starts
{% if opentelemetry_set_ngx_var then %}
set $opentelemetry_context_traceparent '';
set $opentelemetry_trace_id '';
set $opentelemetry_span_id '';
{% end %}
# opentelemetry_set_ngx_var ends
# zipkin_set_ngx_var starts
{% if zipkin_set_ngx_var then %}
set $zipkin_context_traceparent '';
set $zipkin_trace_id '';
set $zipkin_span_id '';
{% end %}
# zipkin_set_ngx_var ends
# http server configuration snippet starts
{% if http_server_configuration_snippet then %}
{* http_server_configuration_snippet *}
{% end %}
# http server configuration snippet ends
location = /apisix/nginx_status {
allow 127.0.0.0/24;
deny all;
access_log off;
stub_status;
}
{% if ssl.enable then %}
ssl_client_hello_by_lua_block {
apisix.ssl_client_hello_phase()
}
ssl_certificate_by_lua_block {
apisix.ssl_phase()
}
{% end %}
{% if http.proxy_ssl_server_name then %}
proxy_ssl_name $upstream_host;
proxy_ssl_server_name on;
{% end %}
location / {
set $upstream_mirror_host '';
set $upstream_mirror_uri '';
set $upstream_upgrade '';
set $upstream_connection '';
set $upstream_scheme 'http';
set $upstream_host $http_host;
set $upstream_uri '';
set $ctx_ref '';
{% if wasm then %}
set $wasm_process_req_body '';
set $wasm_process_resp_body '';
{% end %}
# http server location configuration snippet starts
{% if http_server_location_configuration_snippet then %}
{* http_server_location_configuration_snippet *}
{% end %}
# http server location configuration snippet ends
{% if enabled_plugins["dubbo-proxy"] then %}
set $dubbo_service_name '';
set $dubbo_service_version '';
set $dubbo_method '';
{% end %}
access_by_lua_block {
apisix.http_access_phase()
}
proxy_http_version 1.1;
proxy_set_header Host $upstream_host;
proxy_set_header Upgrade $upstream_upgrade;
proxy_set_header Connection $upstream_connection;
proxy_set_header X-Real-IP $remote_addr;
proxy_pass_header Date;
### the following x-forwarded-* headers is to send to upstream server
set $var_x_forwarded_proto $scheme;
set $var_x_forwarded_host $host;
set $var_x_forwarded_port $server_port;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $var_x_forwarded_proto;
proxy_set_header X-Forwarded-Host $var_x_forwarded_host;
proxy_set_header X-Forwarded-Port $var_x_forwarded_port;
{% if enabled_plugins["proxy-cache"] then %}
### the following configuration is to cache response content from upstream server
set $upstream_cache_zone off;
set $upstream_cache_key '';
set $upstream_cache_bypass '';
set $upstream_no_cache '';
proxy_cache $upstream_cache_zone;
proxy_cache_valid any {% if proxy_cache.cache_ttl then %} {* proxy_cache.cache_ttl *} {% else %} 10s {% end %};
proxy_cache_min_uses 1;
proxy_cache_methods GET HEAD POST;
proxy_cache_lock_timeout 5s;
proxy_cache_use_stale off;
proxy_cache_key $upstream_cache_key;
proxy_no_cache $upstream_no_cache;
proxy_cache_bypass $upstream_cache_bypass;
{% end %}
proxy_pass $upstream_scheme://apisix_backend$upstream_uri;
{% if enabled_plugins["proxy-mirror"] then %}
mirror /proxy_mirror;
{% end %}
header_filter_by_lua_block {
apisix.http_header_filter_phase()
}
body_filter_by_lua_block {
apisix.http_body_filter_phase()
}
log_by_lua_block {
apisix.http_log_phase()
}
}
location @grpc_pass {
access_by_lua_block {
apisix.grpc_access_phase()
}
{% if use_apisix_base then %}
# For servers which obey the standard, when `:authority` is missing,
# `host` will be used instead. When used with apisix-runtime, we can do
# better by setting `:authority` directly
grpc_set_header ":authority" $upstream_host;
{% else %}
grpc_set_header "Host" $upstream_host;
{% end %}
grpc_set_header Content-Type application/grpc;
grpc_set_header TE trailers;
grpc_socket_keepalive on;
grpc_pass $upstream_scheme://apisix_backend;
{% if enabled_plugins["proxy-mirror"] then %}
mirror /proxy_mirror_grpc;
{% end %}
header_filter_by_lua_block {
apisix.http_header_filter_phase()
}
body_filter_by_lua_block {
apisix.http_body_filter_phase()
}
log_by_lua_block {
apisix.http_log_phase()
}
}
{% if enabled_plugins["dubbo-proxy"] then %}
location @dubbo_pass {
access_by_lua_block {
apisix.dubbo_access_phase()
}
dubbo_pass_all_headers on;
dubbo_pass_body on;
dubbo_pass $dubbo_service_name $dubbo_service_version $dubbo_method apisix_dubbo_backend;
header_filter_by_lua_block {
apisix.http_header_filter_phase()
}
body_filter_by_lua_block {
apisix.http_body_filter_phase()
}
log_by_lua_block {
apisix.http_log_phase()
}
}
{% end %}
{% if enabled_plugins["proxy-mirror"] then %}
location = /proxy_mirror {
internal;
{% if not use_apisix_base then %}
if ($upstream_mirror_uri = "") {
return 200;
}
{% end %}
{% if proxy_mirror_timeouts then %}
{% if proxy_mirror_timeouts.connect then %}
proxy_connect_timeout {* proxy_mirror_timeouts.connect *};
{% end %}
{% if proxy_mirror_timeouts.read then %}
proxy_read_timeout {* proxy_mirror_timeouts.read *};
{% end %}
{% if proxy_mirror_timeouts.send then %}
proxy_send_timeout {* proxy_mirror_timeouts.send *};
{% end %}
{% end %}
proxy_http_version 1.1;
proxy_set_header Host $upstream_host;
proxy_pass $upstream_mirror_uri;
}
{% end %}
{% if enabled_plugins["proxy-mirror"] then %}
location = /proxy_mirror_grpc {
internal;
{% if not use_apisix_base then %}
if ($upstream_mirror_uri = "") {
return 200;
}
{% end %}
{% if proxy_mirror_timeouts then %}
{% if proxy_mirror_timeouts.connect then %}
grpc_connect_timeout {* proxy_mirror_timeouts.connect *};
{% end %}
{% if proxy_mirror_timeouts.read then %}
grpc_read_timeout {* proxy_mirror_timeouts.read *};
{% end %}
{% if proxy_mirror_timeouts.send then %}
grpc_send_timeout {* proxy_mirror_timeouts.send *};
{% end %}
{% end %}
grpc_pass $upstream_mirror_host;
}
{% end %}
}
{% end %}
# http end configuration snippet starts
{% if http_end_configuration_snippet then %}
{* http_end_configuration_snippet *}
{% end %}
# http end configuration snippet ends
}
{% end %}
]=]

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,450 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local jsonschema = require("jsonschema")
local pairs = pairs
local pcall = pcall
local require = require
local _M = {}
local etcd_schema = {
type = "object",
properties = {
resync_delay = {
type = "integer",
},
user = {
type = "string",
},
password = {
type = "string",
},
tls = {
type = "object",
properties = {
cert = {
type = "string",
},
key = {
type = "string",
},
},
},
prefix = {
type = "string",
},
host = {
type = "array",
items = {
type = "string",
pattern = [[^https?://]]
},
minItems = 1,
},
timeout = {
type = "integer",
default = 30,
minimum = 1,
description = "etcd connection timeout in seconds",
},
},
required = {"prefix", "host"}
}
local config_schema = {
type = "object",
properties = {
apisix = {
properties = {
lua_module_hook = {
pattern = "^[a-zA-Z._-]+$",
},
proxy_protocol = {
type = "object",
properties = {
listen_http_port = {
type = "integer",
},
listen_https_port = {
type = "integer",
},
enable_tcp_pp = {
type = "boolean",
},
enable_tcp_pp_to_upstream = {
type = "boolean",
},
}
},
proxy_cache = {
type = "object",
properties = {
zones = {
type = "array",
minItems = 1,
items = {
type = "object",
properties = {
name = {
type = "string",
},
memory_size = {
type = "string",
},
disk_size = {
type = "string",
},
disk_path = {
type = "string",
},
cache_levels = {
type = "string",
},
},
oneOf = {
{
required = {"name", "memory_size"},
maxProperties = 2,
},
{
required = {"name", "memory_size", "disk_size",
"disk_path", "cache_levels"},
}
},
},
uniqueItems = true,
}
}
},
proxy_mode = {
type = "string",
enum = {"http", "stream", "http&stream"},
},
stream_proxy = {
type = "object",
properties = {
tcp = {
type = "array",
minItems = 1,
items = {
anyOf = {
{
type = "integer",
},
{
type = "string",
},
{
type = "object",
properties = {
addr = {
anyOf = {
{
type = "integer",
},
{
type = "string",
},
}
},
tls = {
type = "boolean",
}
},
required = {"addr"}
},
},
},
uniqueItems = true,
},
udp = {
type = "array",
minItems = 1,
items = {
anyOf = {
{
type = "integer",
},
{
type = "string",
},
},
},
uniqueItems = true,
},
}
},
dns_resolver = {
type = "array",
minItems = 1,
items = {
type = "string",
}
},
dns_resolver_valid = {
type = "integer",
},
enable_http2 = {
type = "boolean",
default = true
},
ssl = {
type = "object",
properties = {
ssl_trusted_certificate = {
type = "string",
default = "system"
},
listen = {
type = "array",
items = {
type = "object",
properties = {
ip = {
type = "string",
},
port = {
type = "integer",
minimum = 1,
maximum = 65535
},
enable_http3 = {
type = "boolean",
},
}
}
},
}
},
data_encryption = {
type = "object",
properties = {
keyring = {
anyOf = {
{
type = "array",
minItems = 1,
items = {
type = "string",
minLength = 16,
maxLength = 16
}
},
{
type = "string",
minLength = 16,
maxLength = 16
}
}
},
}
},
}
},
nginx_config = {
type = "object",
properties = {
envs = {
type = "array",
minItems = 1,
items = {
type = "string",
}
}
},
},
http = {
type = "object",
properties = {
custom_lua_shared_dict = {
type = "object",
}
}
},
etcd = etcd_schema,
plugins = {
type = "array",
default = {},
minItems = 0,
items = {
type = "string"
}
},
stream_plugins = {
type = "array",
default = {},
minItems = 0,
items = {
type = "string"
}
},
wasm = {
type = "object",
properties = {
plugins = {
type = "array",
minItems = 1,
items = {
type = "object",
properties = {
name = {
type = "string"
},
file = {
type = "string"
},
priority = {
type = "integer"
},
http_request_phase = {
enum = {"access", "rewrite"},
default = "access",
},
},
required = {"name", "file", "priority"}
}
}
}
},
deployment = {
type = "object",
properties = {
role = {
enum = {"traditional", "control_plane", "data_plane", "standalone"},
default = "traditional"
}
},
},
},
required = {"apisix", "deployment"},
}
local admin_schema = {
type = "object",
properties = {
admin_key = {
type = "array",
properties = {
items = {
properties = {
name = {type = "string"},
key = {type = "string"},
role = {type = "string"},
}
}
}
},
admin_listen = {
properties = {
listen = { type = "string" },
port = { type = "integer" },
},
default = {
listen = "0.0.0.0",
port = 9180,
}
},
https_admin = {
type = "boolean",
},
admin_key_required = {
type = "boolean",
},
}
}
local deployment_schema = {
traditional = {
properties = {
etcd = etcd_schema,
admin = admin_schema,
role_traditional = {
properties = {
config_provider = {
enum = {"etcd", "yaml"}
},
},
required = {"config_provider"}
}
},
required = {"etcd"}
},
control_plane = {
properties = {
etcd = etcd_schema,
admin = admin_schema,
role_control_plane = {
properties = {
config_provider = {
enum = {"etcd"}
},
},
required = {"config_provider"}
},
},
required = {"etcd", "role_control_plane"}
},
data_plane = {
properties = {
etcd = etcd_schema,
role_data_plane = {
properties = {
config_provider = {
enum = {"etcd", "yaml", "json", "xds"}
},
},
required = {"config_provider"}
},
},
required = {"role_data_plane"}
}
}
function _M.validate(yaml_conf)
local validator = jsonschema.generate_validator(config_schema)
local ok, err = validator(yaml_conf)
if not ok then
return false, "failed to validate config: " .. err
end
if yaml_conf.discovery then
for kind, conf in pairs(yaml_conf.discovery) do
local ok, schema = pcall(require, "apisix.discovery." .. kind .. ".schema")
if ok then
local validator = jsonschema.generate_validator(schema)
local ok, err = validator(conf)
if not ok then
return false, "invalid discovery " .. kind .. " configuration: " .. err
end
end
end
end
local role = yaml_conf.deployment.role
local validator = jsonschema.generate_validator(deployment_schema[role])
local ok, err = validator(yaml_conf.deployment)
if not ok then
return false, "invalid deployment " .. role .. " configuration: " .. err
end
return true
end
return _M

View File

@@ -0,0 +1,189 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local require = require
local pcall = pcall
local open = io.open
local popen = io.popen
local close = io.close
local exit = os.exit
local stderr = io.stderr
local str_format = string.format
local tonumber = tonumber
local io = io
local ipairs = ipairs
local assert = assert
local _M = {}
-- Note: The `execute_cmd` return value will have a line break at the end,
-- it is recommended to use the `trim` function to handle the return value.
local function execute_cmd(cmd)
local t, err = popen(cmd)
if not t then
return nil, "failed to execute command: "
.. cmd .. ", error info: " .. err
end
local data, err = t:read("*all")
t:close()
if not data then
return nil, "failed to read execution result of: "
.. cmd .. ", error info: " .. err
end
return data
end
_M.execute_cmd = execute_cmd
-- For commands which stdout would be always be empty,
-- forward stderr to stdout to get the error msg
function _M.execute_cmd_with_error(cmd)
return execute_cmd(cmd .. " 2>&1")
end
function _M.trim(s)
return (s:gsub("^%s*(.-)%s*$", "%1"))
end
function _M.split(self, sep)
local sep, fields = sep or ":", {}
local pattern = str_format("([^%s]+)", sep)
self:gsub(pattern, function(c) fields[#fields + 1] = c end)
return fields
end
function _M.read_file(file_path)
local file, err = open(file_path, "rb")
if not file then
return false, "failed to open file: " .. file_path .. ", error info:" .. err
end
local data, err = file:read("*all")
file:close()
if not data then
return false, "failed to read file: " .. file_path .. ", error info:" .. err
end
return data
end
function _M.die(...)
stderr:write(...)
exit(1)
end
function _M.is_32bit_arch()
local ok, ffi = pcall(require, "ffi")
if ok then
-- LuaJIT
return ffi.abi("32bit")
end
local ret = _M.execute_cmd("getconf LONG_BIT")
local bits = tonumber(ret)
return bits <= 32
end
function _M.write_file(file_path, data)
local file, err = open(file_path, "w+")
if not file then
return false, "failed to open file: "
.. file_path
.. ", error info:"
.. err
end
local ok, err = file:write(data)
file:close()
if not ok then
return false, "failed to write file: "
.. file_path
.. ", error info:"
.. err
end
return true
end
function _M.file_exists(file_path)
local f = open(file_path, "r")
return f ~= nil and close(f)
end
do
local trusted_certs_paths = {
"/etc/ssl/certs/ca-certificates.crt", -- Debian/Ubuntu/Gentoo
"/etc/pki/tls/certs/ca-bundle.crt", -- Fedora/RHEL 6
"/etc/ssl/ca-bundle.pem", -- OpenSUSE
"/etc/pki/tls/cacert.pem", -- OpenELEC
"/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", -- CentOS/RHEL 7
"/etc/ssl/cert.pem", -- OpenBSD, Alpine
}
-- Check if a file exists using Lua's built-in `io.open`
local function file_exists(path)
local file = io.open(path, "r")
if file then
file:close()
return true
else
return false
end
end
function _M.get_system_trusted_certs_filepath()
for _, path in ipairs(trusted_certs_paths) do
if file_exists(path) then
return path
end
end
return nil,
"Could not find trusted certs file in " ..
"any of the `system`-predefined locations. " ..
"Please install a certs file there or set " ..
"`lua_ssl_trusted_certificate` to a " ..
"specific file path instead of `system`"
end
end
function _M.gen_trusted_certs_combined_file(combined_filepath, paths)
local combined_file = assert(io.open(combined_filepath, "w"))
for _, path in ipairs(paths) do
local cert_file = assert(io.open(path, "r"))
combined_file:write(cert_file:read("*a"))
combined_file:write("\n")
cert_file:close()
end
combined_file:close()
end
return _M

View File

@@ -0,0 +1,46 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
return {
RPC_ERROR = 0,
RPC_PREPARE_CONF = 1,
RPC_HTTP_REQ_CALL = 2,
RPC_EXTRA_INFO = 3,
RPC_HTTP_RESP_CALL = 4,
HTTP_ETCD_DIRECTORY = {
["/upstreams"] = true,
["/plugins"] = true,
["/ssls"] = true,
["/stream_routes"] = true,
["/plugin_metadata"] = true,
["/routes"] = true,
["/services"] = true,
["/consumers"] = true,
["/global_rules"] = true,
["/protos"] = true,
["/plugin_configs"] = true,
["/consumer_groups"] = true,
["/secrets"] = true,
},
STREAM_ETCD_DIRECTORY = {
["/upstreams"] = true,
["/services"] = true,
["/plugins"] = true,
["/ssls"] = true,
["/stream_routes"] = true,
["/plugin_metadata"] = true,
},
}

View File

@@ -0,0 +1,334 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local config_local = require("apisix.core.config_local")
local secret = require("apisix.secret")
local plugin = require("apisix.plugin")
local plugin_checker = require("apisix.plugin").plugin_checker
local check_schema = require("apisix.core.schema").check
local error = error
local ipairs = ipairs
local pairs = pairs
local type = type
local string_sub = string.sub
local consumers
local _M = {
version = 0.3,
}
local lrucache = core.lrucache.new({
ttl = 300, count = 512
})
-- Please calculate and set the value of the "consumers_count_for_lrucache"
-- variable based on the number of consumers in the current environment,
-- taking into account the appropriate adjustment coefficient.
local consumers_count_for_lrucache = 4096
local function remove_etcd_prefix(key)
local prefix = ""
local local_conf = config_local.local_conf()
local role = core.table.try_read_attr(local_conf, "deployment", "role")
local provider = core.table.try_read_attr(local_conf, "deployment", "role_" ..
role, "config_provider")
if provider == "etcd" and local_conf.etcd and local_conf.etcd.prefix then
prefix = local_conf.etcd.prefix
end
return string_sub(key, #prefix + 1)
end
-- /{etcd.prefix}/consumers/{consumer_name}/credentials/{credential_id} --> {consumer_name}
local function get_consumer_name_from_credential_etcd_key(key)
local uri_segs = core.utils.split_uri(remove_etcd_prefix(key))
return uri_segs[3]
end
local function is_credential_etcd_key(key)
if not key then
return false
end
local uri_segs = core.utils.split_uri(remove_etcd_prefix(key))
return uri_segs[2] == "consumers" and uri_segs[4] == "credentials"
end
local function get_credential_id_from_etcd_key(key)
local uri_segs = core.utils.split_uri(remove_etcd_prefix(key))
return uri_segs[5]
end
local function filter_consumers_list(data_list)
if #data_list == 0 then
return data_list
end
local list = {}
for _, item in ipairs(data_list) do
if not (type(item) == "table" and is_credential_etcd_key(item.key)) then
core.table.insert(list, item)
end
end
return list
end
local plugin_consumer
do
local consumers_id_lrucache = core.lrucache.new({
count = consumers_count_for_lrucache
})
local function construct_consumer_data(val, plugin_config)
-- if the val is a Consumer, clone it to the local consumer;
-- if the val is a Credential, to get the Consumer by consumer_name and then clone
-- it to the local consumer.
local consumer
if is_credential_etcd_key(val.key) then
local consumer_name = get_consumer_name_from_credential_etcd_key(val.key)
local the_consumer = consumers:get(consumer_name)
if the_consumer and the_consumer.value then
consumer = core.table.clone(the_consumer.value)
consumer.modifiedIndex = the_consumer.modifiedIndex
consumer.credential_id = get_credential_id_from_etcd_key(val.key)
else
-- Normally wouldn't get here:
-- it should belong to a consumer for any credential.
core.log.error("failed to get the consumer for the credential,",
" a wild credential has appeared!",
" credential key: ", val.key, ", consumer name: ", consumer_name)
return nil, "failed to get the consumer for the credential"
end
else
consumer = core.table.clone(val.value)
consumer.modifiedIndex = val.modifiedIndex
end
-- if the consumer has labels, set the field custom_id to it.
-- the custom_id is used to set in the request headers to the upstream.
if consumer.labels then
consumer.custom_id = consumer.labels["custom_id"]
end
-- Note: the id here is the key of consumer data, which
-- is 'username' field in admin
consumer.consumer_name = consumer.id
consumer.auth_conf = plugin_config
return consumer
end
function plugin_consumer()
local plugins = {}
if consumers.values == nil then
return plugins
end
-- consumers.values is the list that got from etcd by prefix key {etcd_prefix}/consumers.
-- So it contains consumers and credentials.
-- The val in the for-loop may be a Consumer or a Credential.
for _, val in ipairs(consumers.values) do
if type(val) ~= "table" then
goto CONTINUE
end
for name, config in pairs(val.value.plugins or {}) do
local plugin_obj = plugin.get(name)
if plugin_obj and plugin_obj.type == "auth" then
if not plugins[name] then
plugins[name] = {
nodes = {},
len = 0,
conf_version = consumers.conf_version
}
end
local consumer = consumers_id_lrucache(val.value.id .. name,
val.modifiedIndex, construct_consumer_data, val, config)
if consumer == nil then
goto CONTINUE
end
plugins[name].len = plugins[name].len + 1
core.table.insert(plugins[name].nodes, plugins[name].len,
consumer)
core.log.info("consumer:", core.json.delay_encode(consumer))
end
end
::CONTINUE::
end
return plugins
end
end
_M.filter_consumers_list = filter_consumers_list
function _M.get_consumer_key_from_credential_key(key)
local uri_segs = core.utils.split_uri(key)
return "/consumers/" .. uri_segs[3]
end
function _M.plugin(plugin_name)
local plugin_conf = core.lrucache.global("/consumers",
consumers.conf_version, plugin_consumer)
return plugin_conf[plugin_name]
end
function _M.consumers_conf(plugin_name)
return _M.plugin(plugin_name)
end
-- attach chosen consumer to the ctx, used in auth plugin
function _M.attach_consumer(ctx, consumer, conf)
ctx.consumer = consumer
ctx.consumer_name = consumer.consumer_name
ctx.consumer_group_id = consumer.group_id
ctx.consumer_ver = conf.conf_version
core.request.set_header(ctx, "X-Consumer-Username", consumer.username)
core.request.set_header(ctx, "X-Credential-Identifier", consumer.credential_id)
core.request.set_header(ctx, "X-Consumer-Custom-ID", consumer.custom_id)
end
function _M.consumers()
if not consumers then
return nil, nil
end
return filter_consumers_list(consumers.values), consumers.conf_version
end
local create_consume_cache
do
local consumer_lrucache = core.lrucache.new({
count = consumers_count_for_lrucache
})
local function fill_consumer_secret(consumer)
local new_consumer = core.table.clone(consumer)
new_consumer.auth_conf = secret.fetch_secrets(new_consumer.auth_conf, false)
return new_consumer
end
function create_consume_cache(consumers_conf, key_attr)
local consumer_names = {}
for _, consumer in ipairs(consumers_conf.nodes) do
core.log.info("consumer node: ", core.json.delay_encode(consumer))
local new_consumer = consumer_lrucache(consumer, nil,
fill_consumer_secret, consumer)
consumer_names[new_consumer.auth_conf[key_attr]] = new_consumer
end
return consumer_names
end
end
function _M.consumers_kv(plugin_name, consumer_conf, key_attr)
local consumers = lrucache("consumers_key#" .. plugin_name, consumer_conf.conf_version,
create_consume_cache, consumer_conf, key_attr)
return consumers
end
function _M.find_consumer(plugin_name, key, key_value)
local consumer
local consumer_conf
consumer_conf = _M.plugin(plugin_name)
if not consumer_conf then
return nil, nil, "Missing related consumer"
end
local consumers = _M.consumers_kv(plugin_name, consumer_conf, key)
consumer = consumers[key_value]
return consumer, consumer_conf
end
local function check_consumer(consumer, key)
local data_valid
local err
if is_credential_etcd_key(key) then
data_valid, err = check_schema(core.schema.credential, consumer)
else
data_valid, err = check_schema(core.schema.consumer, consumer)
end
if not data_valid then
return data_valid, err
end
return plugin_checker(consumer, core.schema.TYPE_CONSUMER)
end
function _M.init_worker()
local err
local cfg = {
automatic = true,
checker = check_consumer,
}
consumers, err = core.config.new("/consumers", cfg)
if not consumers then
error("failed to create etcd instance for fetching consumers: " .. err)
return
end
end
local function get_anonymous_consumer_from_local_cache(name)
local anon_consumer_raw = consumers:get(name)
if not anon_consumer_raw or not anon_consumer_raw.value or
not anon_consumer_raw.value.id or not anon_consumer_raw.modifiedIndex then
return nil, nil, "failed to get anonymous consumer " .. name
end
-- make structure of anon_consumer similar to that of consumer_mod.consumers_kv's response
local anon_consumer = anon_consumer_raw.value
anon_consumer.consumer_name = anon_consumer_raw.value.id
anon_consumer.modifiedIndex = anon_consumer_raw.modifiedIndex
local anon_consumer_conf = {
conf_version = anon_consumer_raw.modifiedIndex
}
return anon_consumer, anon_consumer_conf
end
function _M.get_anonymous_consumer(name)
local anon_consumer, anon_consumer_conf, err
anon_consumer, anon_consumer_conf, err = get_anonymous_consumer_from_local_cache(name)
return anon_consumer, anon_consumer_conf, err
end
return _M

View File

@@ -0,0 +1,55 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local plugin_checker = require("apisix.plugin").plugin_checker
local error = error
local consumer_groups
local _M = {
}
function _M.init_worker()
local err
consumer_groups, err = core.config.new("/consumer_groups", {
automatic = true,
item_schema = core.schema.consumer_group,
checker = plugin_checker,
})
if not consumer_groups then
error("failed to sync /consumer_groups: " .. err)
end
end
function _M.consumer_groups()
if not consumer_groups then
return nil, nil
end
return consumer_groups.values, consumer_groups.conf_version
end
function _M.get(id)
return consumer_groups:get(id)
end
return _M

View File

@@ -0,0 +1,212 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local require = require
local router = require("apisix.utils.router")
local radixtree = require("resty.radixtree")
local builtin_v1_routes = require("apisix.control.v1")
local plugin_mod = require("apisix.plugin")
local core = require("apisix.core")
local str_sub = string.sub
local ipairs = ipairs
local pairs = pairs
local type = type
local ngx = ngx
local get_method = ngx.req.get_method
local events = require("apisix.events")
local _M = {}
local function format_dismod_uri(mod_name, uri)
if core.string.has_prefix(uri, "/v1/") then
return uri
end
local tmp = {"/v1/discovery/", mod_name}
if not core.string.has_prefix(uri, "/") then
core.table.insert(tmp, "/")
end
core.table.insert(tmp, uri)
return core.table.concat(tmp, "")
end
-- we do not hardcode the discovery module's control api uri
local function format_dismod_control_api_uris(mod_name, api_route)
if not api_route or #api_route == 0 then
return api_route
end
local clone_route = core.table.clone(api_route)
for _, v in ipairs(clone_route) do
local uris = v.uris
local target_uris = core.table.new(#uris, 0)
for _, uri in ipairs(uris) do
local target_uri = format_dismod_uri(mod_name, uri)
core.table.insert(target_uris, target_uri)
end
v.uris = target_uris
end
return clone_route
end
local fetch_control_api_router
do
local function register_api_routes(routes, api_routes)
for _, route in ipairs(api_routes) do
core.table.insert(routes, {
methods = route.methods,
-- note that it is 'uris' for control API, which is an array of strings
paths = route.uris,
handler = function (api_ctx)
local code, body = route.handler(api_ctx)
if code or body then
if type(body) == "table" and ngx.header["Content-Type"] == nil then
core.response.set_header("Content-Type", "application/json")
end
core.response.exit(code, body)
end
end
})
end
end
local routes = {}
local v1_routes = {}
local function empty_func() end
function fetch_control_api_router()
core.table.clear(routes)
for _, plugin in ipairs(plugin_mod.plugins) do
local api_fun = plugin.control_api
if api_fun then
local api_route = api_fun()
register_api_routes(routes, api_route)
end
end
local discovery_type = require("apisix.core.config_local").local_conf().discovery
if discovery_type then
local discovery = require("apisix.discovery.init").discovery
local dump_apis = {}
for key, _ in pairs(discovery_type) do
local dis_mod = discovery[key]
-- if discovery module has control_api method, support it
local api_fun = dis_mod.control_api
if api_fun then
local api_route = api_fun()
local format_route = format_dismod_control_api_uris(key, api_route)
register_api_routes(routes, format_route)
end
local dump_data = dis_mod.dump_data
if dump_data then
local target_uri = format_dismod_uri(key, "/dump")
local item = {
methods = {"GET"},
uris = {target_uri},
handler = function()
return 200, dump_data()
end
}
core.table.insert(dump_apis, item)
end
end
if #dump_apis > 0 then
core.log.notice("dump_apis: ", core.json.encode(dump_apis, true))
register_api_routes(routes, dump_apis)
end
end
core.table.clear(v1_routes)
register_api_routes(v1_routes, builtin_v1_routes)
local v1_router, err = router.new(v1_routes)
if not v1_router then
return nil, err
end
core.table.insert(routes, {
paths = {"/v1/*"},
filter_fun = function(vars, opts, ...)
local uri = str_sub(vars.uri, #"/v1" + 1)
return v1_router:dispatch(uri, opts, ...)
end,
handler = empty_func,
})
local with_parameter = false
local conf = core.config.local_conf()
if conf.apisix.enable_control and conf.apisix.control then
if conf.apisix.control.router == "radixtree_uri_with_parameter" then
with_parameter = true
end
end
if with_parameter then
return radixtree.new(routes)
else
return router.new(routes)
end
end
end -- do
do
local match_opts = {}
local cached_version
local router
function _M.match(uri)
if cached_version ~= plugin_mod.load_times then
local err
router, err = fetch_control_api_router()
if router == nil then
core.log.error("failed to fetch valid api router: ", err)
return false
end
cached_version = plugin_mod.load_times
end
core.table.clear(match_opts)
match_opts.method = get_method()
return router:dispatch(uri, match_opts)
end
end -- do
local function reload_plugins()
core.log.info("start to hot reload plugins")
plugin_mod.load()
end
function _M.init_worker()
-- register reload plugin handler
events:register(reload_plugins, builtin_v1_routes.reload_event, "PUT")
end
return _M

View File

@@ -0,0 +1,506 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local require = require
local core = require("apisix.core")
local plugin = require("apisix.plugin")
local get_routes = require("apisix.router").http_routes
local get_services = require("apisix.http.service").services
local upstream_mod = require("apisix.upstream")
local get_upstreams = upstream_mod.upstreams
local collectgarbage = collectgarbage
local ipairs = ipairs
local pcall = pcall
local str_format = string.format
local ngx = ngx
local ngx_var = ngx.var
local events = require("apisix.events")
local _M = {}
_M.RELOAD_EVENT = 'control-api-plugin-reload'
function _M.schema()
local http_plugins, stream_plugins = plugin.get_all({
version = true,
priority = true,
schema = true,
metadata_schema = true,
consumer_schema = true,
type = true,
scope = true,
})
local schema = {
main = {
consumer = core.schema.consumer,
consumer_group = core.schema.consumer_group,
global_rule = core.schema.global_rule,
plugin_config = core.schema.plugin_config,
plugins = core.schema.plugins,
proto = core.schema.proto,
route = core.schema.route,
service = core.schema.service,
ssl = core.schema.ssl,
stream_route = core.schema.stream_route,
upstream = core.schema.upstream,
upstream_hash_header_schema = core.schema.upstream_hash_header_schema,
upstream_hash_vars_schema = core.schema.upstream_hash_vars_schema,
},
plugins = http_plugins,
stream_plugins = stream_plugins,
}
return 200, schema
end
local healthcheck
local function extra_checker_info(value)
if not healthcheck then
healthcheck = require("resty.healthcheck")
end
local name = upstream_mod.get_healthchecker_name(value)
local nodes, err = healthcheck.get_target_list(name, "upstream-healthcheck")
if err then
core.log.error("healthcheck.get_target_list failed: ", err)
end
return {
name = value.key,
nodes = nodes,
}
end
local function get_checker_type(checks)
if checks.active and checks.active.type then
return checks.active.type
elseif checks.passive and checks.passive.type then
return checks.passive.type
end
end
local function iter_and_add_healthcheck_info(infos, values)
if not values then
return
end
for _, value in core.config_util.iterate_values(values) do
local checks = value.value.checks or (value.value.upstream and value.value.upstream.checks)
if checks then
local info = extra_checker_info(value)
info.type = get_checker_type(checks)
core.table.insert(infos, info)
end
end
end
local HTML_TEMPLATE = [[
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>APISIX upstream check status</title>
</head>
<body>
<h1>APISIX upstream check status</h1>
<table style="background-color:white" cellspacing="0" cellpadding="3" border="1">
<tr bgcolor="#C0C0C0">
<th>Index</th>
<th>Upstream</th>
<th>Check type</th>
<th>Host</th>
<th>Status</th>
<th>Success counts</th>
<th>TCP Failures</th>
<th>HTTP Failures</th>
<th>TIMEOUT Failures</th>
</tr>
{% local i = 0 %}
{% for _, stat in ipairs(stats) do %}
{% for _, node in ipairs(stat.nodes) do %}
{% i = i + 1 %}
{% if node.status == "healthy" or node.status == "mostly_healthy" then %}
<tr>
{% else %}
<tr bgcolor="#FF0000">
{% end %}
<td>{* i *}</td>
<td>{* stat.name *}</td>
<td>{* stat.type *}</td>
<td>{* node.ip .. ":" .. node.port *}</td>
<td>{* node.status *}</td>
<td>{* node.counter.success *}</td>
<td>{* node.counter.tcp_failure *}</td>
<td>{* node.counter.http_failure *}</td>
<td>{* node.counter.timeout_failure *}</td>
</tr>
{% end %}
{% end %}
</table>
</body>
</html>
]]
local html_render
local function try_render_html(data)
if not html_render then
local template = require("resty.template")
html_render = template.compile(HTML_TEMPLATE)
end
local accept = ngx_var.http_accept
if accept and accept:find("text/html") then
local ok, out = pcall(html_render, data)
if not ok then
local err = str_format("HTML template rendering: %s", out)
core.log.error(err)
return nil, err
end
return out
end
end
local function _get_health_checkers()
local infos = {}
local routes = get_routes()
iter_and_add_healthcheck_info(infos, routes)
local services = get_services()
iter_and_add_healthcheck_info(infos, services)
local upstreams = get_upstreams()
iter_and_add_healthcheck_info(infos, upstreams)
return infos
end
function _M.get_health_checkers()
local infos = _get_health_checkers()
local out, err = try_render_html({stats=infos})
if out then
core.response.set_header("Content-Type", "text/html")
return 200, out
end
if err then
return 503, {error_msg = err}
end
return 200, infos
end
local function iter_and_find_healthcheck_info(values, src_type, src_id)
if not values then
return nil, str_format("%s[%s] not found", src_type, src_id)
end
for _, value in core.config_util.iterate_values(values) do
if value.value.id == src_id then
local checks = value.value.checks or
(value.value.upstream and value.value.upstream.checks)
if not checks then
return nil, str_format("no checker for %s[%s]", src_type, src_id)
end
local info = extra_checker_info(value)
info.type = get_checker_type(checks)
return info
end
end
return nil, str_format("%s[%s] not found", src_type, src_id)
end
function _M.get_health_checker()
local uri_segs = core.utils.split_uri(ngx_var.uri)
core.log.info("healthcheck uri: ", core.json.delay_encode(uri_segs))
local src_type, src_id = uri_segs[4], uri_segs[5]
if not src_id then
return 404, {error_msg = str_format("missing src id for src type %s", src_type)}
end
local values
if src_type == "routes" then
values = get_routes()
elseif src_type == "services" then
values = get_services()
elseif src_type == "upstreams" then
values = get_upstreams()
else
return 400, {error_msg = str_format("invalid src type %s", src_type)}
end
local info, err = iter_and_find_healthcheck_info(values, src_type, src_id)
if not info then
return 404, {error_msg = err}
end
local out, err = try_render_html({stats={info}})
if out then
core.response.set_header("Content-Type", "text/html")
return 200, out
end
if err then
return 503, {error_msg = err}
end
return 200, info
end
local function iter_add_get_routes_info(values, route_id)
local infos = {}
for _, route in core.config_util.iterate_values(values) do
local new_route = core.table.deepcopy(route)
if new_route.value.upstream and new_route.value.upstream.parent then
new_route.value.upstream.parent = nil
end
-- remove healthcheck info
new_route.checker = nil
new_route.checker_idx = nil
new_route.checker_upstream = nil
new_route.clean_handlers = nil
core.table.insert(infos, new_route)
-- check the route id
if route_id and route.value.id == route_id then
return new_route
end
end
if not route_id then
return infos
end
return nil
end
function _M.dump_all_routes_info()
local routes = get_routes()
local infos = iter_add_get_routes_info(routes, nil)
return 200, infos
end
function _M.dump_route_info()
local routes = get_routes()
local uri_segs = core.utils.split_uri(ngx_var.uri)
local route_id = uri_segs[4]
local route = iter_add_get_routes_info(routes, route_id)
if not route then
return 404, {error_msg = str_format("route[%s] not found", route_id)}
end
return 200, route
end
local function iter_add_get_upstream_info(values, upstream_id)
if not values then
return nil
end
local infos = {}
for _, upstream in core.config_util.iterate_values(values) do
local new_upstream = core.table.deepcopy(upstream)
core.table.insert(infos, new_upstream)
if new_upstream.value and new_upstream.value.parent then
new_upstream.value.parent = nil
end
-- check the upstream id
if upstream_id and upstream.value.id == upstream_id then
return new_upstream
end
end
if not upstream_id then
return infos
end
return nil
end
function _M.dump_all_upstreams_info()
local upstreams = get_upstreams()
local infos = iter_add_get_upstream_info(upstreams, nil)
return 200, infos
end
function _M.dump_upstream_info()
local upstreams = get_upstreams()
local uri_segs = core.utils.split_uri(ngx_var.uri)
local upstream_id = uri_segs[4]
local upstream = iter_add_get_upstream_info(upstreams, upstream_id)
if not upstream then
return 404, {error_msg = str_format("upstream[%s] not found", upstream_id)}
end
return 200, upstream
end
function _M.trigger_gc()
-- TODO: find a way to trigger GC in the stream subsystem
collectgarbage()
return 200
end
local function iter_add_get_services_info(values, svc_id)
local infos = {}
for _, svc in core.config_util.iterate_values(values) do
local new_svc = core.table.deepcopy(svc)
if new_svc.value.upstream and new_svc.value.upstream.parent then
new_svc.value.upstream.parent = nil
end
-- remove healthcheck info
new_svc.checker = nil
new_svc.checker_idx = nil
new_svc.checker_upstream = nil
new_svc.clean_handlers = nil
core.table.insert(infos, new_svc)
-- check the service id
if svc_id and svc.value.id == svc_id then
return new_svc
end
end
if not svc_id then
return infos
end
return nil
end
function _M.dump_all_services_info()
local services = get_services()
local infos = iter_add_get_services_info(services, nil)
return 200, infos
end
function _M.dump_service_info()
local services = get_services()
local uri_segs = core.utils.split_uri(ngx_var.uri)
local svc_id = uri_segs[4]
local info = iter_add_get_services_info(services, svc_id)
if not info then
return 404, {error_msg = str_format("service[%s] not found", svc_id)}
end
return 200, info
end
function _M.dump_all_plugin_metadata()
local names = core.config.local_conf().plugins
local metadatas = core.table.new(0, #names)
for _, name in ipairs(names) do
local metadata = plugin.plugin_metadata(name)
if metadata then
core.table.insert(metadatas, metadata.value)
end
end
return 200, metadatas
end
function _M.dump_plugin_metadata()
local uri_segs = core.utils.split_uri(ngx_var.uri)
local name = uri_segs[4]
local metadata = plugin.plugin_metadata(name)
if not metadata then
return 404, {error_msg = str_format("plugin metadata[%s] not found", name)}
end
return 200, metadata.value
end
function _M.post_reload_plugins()
local success, err = events:post(_M.RELOAD_EVENT, ngx.req.get_method(), ngx.time())
if not success then
core.response.exit(503, err)
end
core.response.exit(200, "done")
end
return {
-- /v1/schema
{
methods = {"GET"},
uris = {"/schema"},
handler = _M.schema,
},
-- /v1/healthcheck
{
methods = {"GET"},
uris = {"/healthcheck"},
handler = _M.get_health_checkers,
},
-- /v1/healthcheck/{src_type}/{src_id}
{
methods = {"GET"},
uris = {"/healthcheck/*"},
handler = _M.get_health_checker,
},
-- /v1/gc
{
methods = {"POST"},
uris = {"/gc"},
handler = _M.trigger_gc,
},
-- /v1/routes
{
methods = {"GET"},
uris = {"/routes"},
handler = _M.dump_all_routes_info,
},
-- /v1/route/*
{
methods = {"GET"},
uris = {"/route/*"},
handler = _M.dump_route_info,
},
-- /v1/services
{
methods = {"GET"},
uris = {"/services"},
handler = _M.dump_all_services_info
},
-- /v1/service/*
{
methods = {"GET"},
uris = {"/service/*"},
handler = _M.dump_service_info
},
-- /v1/upstreams
{
methods = {"GET"},
uris = {"/upstreams"},
handler = _M.dump_all_upstreams_info,
},
-- /v1/upstream/*
{
methods = {"GET"},
uris = {"/upstream/*"},
handler = _M.dump_upstream_info,
},
-- /v1/plugin_metadatas
{
methods = {"GET"},
uris = {"/plugin_metadatas"},
handler = _M.dump_all_plugin_metadata,
},
-- /v1/plugin_metadata/*
{
methods = {"GET"},
uris = {"/plugin_metadata/*"},
handler = _M.dump_plugin_metadata,
},
-- /v1/plugins/reload
{
methods = {"PUT"},
uris = {"/plugins/reload"},
handler = _M.post_reload_plugins,
},
get_health_checkers = _get_health_checkers,
reload_event = _M.RELOAD_EVENT,
}

View File

@@ -0,0 +1,68 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local log = require("apisix.core.log")
local utils = require("apisix.core.utils")
local local_conf, err = require("apisix.core.config_local").local_conf()
if not local_conf then
error("failed to parse yaml config: " .. err)
end
local config_provider = local_conf.deployment and local_conf.deployment.config_provider
or "etcd"
log.info("use config_provider: ", config_provider)
local config
-- Currently, we handle JSON parsing in config_yaml, so special processing is needed here.
if config_provider == "json" then
config = require("apisix.core.config_yaml")
config.file_type = "json"
else
config = require("apisix.core.config_" .. config_provider)
end
config.type = config_provider
return {
version = require("apisix.core.version"),
log = log,
config = config,
config_util = require("apisix.core.config_util"),
sleep = utils.sleep,
json = require("apisix.core.json"),
table = require("apisix.core.table"),
request = require("apisix.core.request"),
response = require("apisix.core.response"),
lrucache = require("apisix.core.lrucache"),
schema = require("apisix.schema_def"),
string = require("apisix.core.string"),
ctx = require("apisix.core.ctx"),
timer = require("apisix.core.timer"),
id = require("apisix.core.id"),
ip = require("apisix.core.ip"),
io = require("apisix.core.io"),
utils = utils,
dns_client = require("apisix.core.dns.client"),
etcd = require("apisix.core.etcd"),
tablepool = require("tablepool"),
resolver = require("apisix.core.resolver"),
os = require("apisix.core.os"),
pubsub = require("apisix.core.pubsub"),
math = require("apisix.core.math"),
event = require("apisix.core.event"),
env = require("apisix.core.env"),
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,71 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- Get configuration information.
--
-- @module core.config_local
local file = require("apisix.cli.file")
local _M = {}
local config_data
function _M.clear_cache()
config_data = nil
end
---
-- Get the local config info.
-- The configuration information consists of two parts, user-defined configuration in
-- `conf/config.yaml` and default configuration in `conf/config-default.yaml`. The configuration
-- of the same name present in `conf/config.yaml` will overwrite `conf/config-default.yaml`.
-- The final full configuration is `conf/config.yaml` and the default configuration in
-- `conf/config-default.yaml` that is not overwritten.
--
-- @function core.config_local.local_conf
-- @treturn table The configuration information.
-- @usage
-- -- Given a config item in `conf/config.yaml`:
-- --
-- -- apisix:
-- -- ssl:
-- -- fallback_sni: "a.test2.com"
-- --
-- -- you can get the value of `fallback_sni` by:
-- local local_conf = core.config.local_conf()
-- local fallback_sni = core.table.try_read_attr(
-- local_conf, "apisix", "ssl", "fallback_sni") -- "a.test2.com"
function _M.local_conf(force)
if not force and config_data then
return config_data
end
local default_conf, err = file.read_yaml_conf()
if not default_conf then
return nil, err
end
config_data = default_conf
return config_data
end
return _M

View File

@@ -0,0 +1,219 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- Collection of util functions
--
-- @module core.config_util
local core_tab = require("apisix.core.table")
local log = require("apisix.core.log")
local str_byte = string.byte
local str_char = string.char
local ipairs = ipairs
local setmetatable = setmetatable
local tostring = tostring
local type = type
local _M = {}
local function _iterate_values(self, tab)
while true do
self.idx = self.idx + 1
local v = tab[self.idx]
if type(v) == "table" then
return self.idx, v
end
if v == nil then
return nil, nil
end
-- skip the tombstone
end
end
function _M.iterate_values(tab)
local iter = setmetatable({idx = 0}, {__call = _iterate_values})
return iter, tab, 0
end
-- Add a clean handler to a runtime configuration item.
-- The clean handler will be called when the item is deleted from configuration
-- or cancelled. Note that Nginx worker exit doesn't trigger the clean handler.
-- Return an index so that we can cancel it later.
function _M.add_clean_handler(item, func)
if not item.clean_handlers then
return nil, "clean handlers for the item are nil"
end
if not item.clean_handlers._id then
item.clean_handlers._id = 1
end
local id = item.clean_handlers._id
item.clean_handlers._id = item.clean_handlers._id + 1
core_tab.insert(item.clean_handlers, {f = func, id = id})
return id
end
-- cancel a clean handler added by add_clean_handler.
-- If `fire` is true, call the clean handler.
function _M.cancel_clean_handler(item, idx, fire)
local pos, f
-- the number of pending clean handler is small so we can cancel them in O(n)
for i, clean_handler in ipairs(item.clean_handlers) do
if clean_handler.id == idx then
pos = i
f = clean_handler.f
break
end
end
if not pos then
log.error("failed to find clean_handler with idx ", idx)
return
end
core_tab.remove(item.clean_handlers, pos)
if not fire then
return
end
if f then
f(item)
else
log.error("The function used to clear the health checker is nil, please check")
end
end
-- fire all clean handlers added by add_clean_handler.
function _M.fire_all_clean_handlers(item)
-- When the key is deleted, the item will be set to false.
if not item then
return
end
if not item.clean_handlers then
return
end
for _, clean_handler in ipairs(item.clean_handlers) do
clean_handler.f(item)
end
item.clean_handlers = {}
end
---
-- Convert different time units to seconds as time units.
-- Time intervals can be specified in milliseconds, seconds, minutes, hours, days and so on,
-- using the following suffixes:
-- ms milliseconds
-- s seconds
-- m minutes
-- h hours
-- d days
-- w weeks
-- M months, 30 days
-- y years, 365 days
-- Multiple units can be combined in a single value by specifying them in the order from the most
-- to the least significant, and optionally separated by whitespace.
-- A value without a suffix means seconds.
--
-- @function core.config_util.parse_time_unit
-- @tparam number|string s Strings with time units, e.g. "60m".
-- @treturn number Number of seconds after conversion
-- @usage
-- local seconds = core.config_util.parse_time_unit("60m") -- 3600
function _M.parse_time_unit(s)
local typ = type(s)
if typ == "number" then
return s
end
if typ ~= "string" or #s == 0 then
return nil, "invalid data: " .. tostring(s)
end
local size = 0
local size_in_unit = 0
local step = 60 * 60 * 24 * 365
local with_ms = false
for i = 1, #s do
local scale
local unit = str_byte(s, i)
if unit == 121 then -- y
scale = 60 * 60 * 24 * 365
elseif unit == 77 then -- M
scale = 60 * 60 * 24 * 30
elseif unit == 119 then -- w
scale = 60 * 60 * 24 * 7
elseif unit == 100 then -- d
scale = 60 * 60 * 24
elseif unit == 104 then -- h
scale = 60 * 60
elseif unit == 109 then -- m
unit = str_byte(s, i + 1)
if unit == 115 then -- ms
size = size * 1000
with_ms = true
step = 0
break
end
scale = 60
elseif unit == 115 then -- s
scale = 1
elseif 48 <= unit and unit <= 57 then
size_in_unit = size_in_unit * 10 + unit - 48
elseif unit ~= 32 then
return nil, "invalid data: " .. str_char(unit)
end
if scale ~= nil then
if scale > step then
return nil, "unexpected unit: " .. str_char(unit)
end
step = scale
size = size + scale * size_in_unit
size_in_unit = 0
end
end
if size_in_unit > 0 then
if step == 1 then
return nil, "specific unit conflicts with the default unit second"
end
size = size + size_in_unit
end
if with_ms then
size = size / 1000
end
return size
end
return _M

View File

@@ -0,0 +1,378 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- Get configuration form ngx.shared.DICT
--
-- @module core.config_xds
local config_local = require("apisix.core.config_local")
local config_util = require("apisix.core.config_util")
local string = require("apisix.core.string")
local log = require("apisix.core.log")
local json = require("apisix.core.json")
local os = require("apisix.core.os")
local ngx_sleep = require("apisix.core.utils").sleep
local check_schema = require("apisix.core.schema").check
local new_tab = require("table.new")
local table = table
local insert_tab = table.insert
local error = error
local pcall = pcall
local tostring = tostring
local setmetatable = setmetatable
local io = io
local io_open = io.open
local io_close = io.close
local package = package
local ipairs = ipairs
local type = type
local sub_str = string.sub
local ffi = require ("ffi")
local C = ffi.C
local config = ngx.shared["xds-config"]
local conf_ver = ngx.shared["xds-config-version"]
local is_http = ngx.config.subsystem == "http"
local ngx_re_match = ngx.re.match
local ngx_re_gmatch = ngx.re.gmatch
local ngx_timer_every = ngx.timer.every
local ngx_timer_at = ngx.timer.at
local exiting = ngx.worker.exiting
local ngx_time = ngx.time
local xds_lib_name = "libxds.so"
local process
if is_http then
process = require("ngx.process")
end
local shdict_udata_to_zone
if not pcall(function() return C.ngx_http_lua_ffi_shdict_udata_to_zone end) then
shdict_udata_to_zone = C.ngx_meta_lua_ffi_shdict_udata_to_zone
else
shdict_udata_to_zone = C.ngx_http_lua_ffi_shdict_udata_to_zone
end
ffi.cdef[[
extern void initial(void* config_zone, void* version_zone);
]]
local created_obj = {}
local _M = {
version = 0.1,
local_conf = config_local.local_conf,
}
local mt = {
__index = _M,
__tostring = function(self)
return " xds key: " .. self.key
end
}
-- todo: refactor this function in chash.lua and radixtree.lua
local function load_shared_lib(lib_name)
local cpath = package.cpath
local tried_paths = new_tab(32, 0)
local i = 1
local iter, err = ngx_re_gmatch(cpath, "[^;]+", "jo")
if not iter then
error("failed to gmatch: " .. err)
end
while true do
local it = iter()
local fpath
fpath, err = ngx_re_match(it[0], "(.*/)", "jo")
if err then
error("failed to match: " .. err)
end
local spath = fpath[0] .. lib_name
local f = io_open(spath)
if f ~= nil then
io_close(f)
return ffi.load(spath)
end
tried_paths[i] = spath
i = i + 1
if not it then
break
end
end
return nil, tried_paths
end
local function load_libxds(lib_name)
local xdsagent, tried_paths = load_shared_lib(lib_name)
if not xdsagent then
tried_paths[#tried_paths + 1] = 'tried above paths but can not load ' .. lib_name
error("can not load xds library, tried paths: " ..
table.concat(tried_paths, '\r\n', 1, #tried_paths))
end
local config_zone = shdict_udata_to_zone(config[1])
local config_shd_cdata = ffi.cast("void*", config_zone)
local conf_ver_zone = shdict_udata_to_zone(conf_ver[1])
local conf_ver_shd_cdata = ffi.cast("void*", conf_ver_zone)
xdsagent.initial(config_shd_cdata, conf_ver_shd_cdata)
end
local latest_version
local function sync_data(self)
if self.conf_version == latest_version then
return true
end
if self.values then
for _, val in ipairs(self.values) do
config_util.fire_all_clean_handlers(val)
end
self.values = nil
self.values_hash = nil
end
local keys = config:get_keys(0)
if not keys or #keys <= 0 then
-- xds did not write any data to shdict
return false, "no keys"
end
self.values = new_tab(#keys, 0)
self.values_hash = new_tab(0, #keys)
for _, key in ipairs(keys) do
if string.has_prefix(key, self.key) then
local data_valid = true
local conf_str = config:get(key, 0)
local conf, err = json.decode(conf_str)
if not conf then
data_valid = false
log.error("decode the conf of [", key, "] failed, err: ", err,
", conf_str: ", conf_str)
end
if not self.single_item and type(conf) ~= "table" then
data_valid = false
log.error("invalid conf of [", key, "], conf: ", conf,
", it should be an object")
end
if data_valid and self.item_schema then
local ok, err = check_schema(self.item_schema, conf)
if not ok then
data_valid = false
log.error("failed to check the conf of [", key, "] err:", err)
end
end
if data_valid and self.checker then
local ok, err = self.checker(conf)
if not ok then
data_valid = false
log.error("failed to check the conf of [", key, "] err:", err)
end
end
if data_valid then
if not conf.id then
conf.id = sub_str(key, #self.key + 2, #key + 1)
log.warn("the id of [", key, "] is nil, use the id: ", conf.id)
end
local conf_item = {value = conf, modifiedIndex = latest_version,
key = key}
insert_tab(self.values, conf_item)
self.values_hash[conf.id] = #self.values
conf_item.clean_handlers = {}
if self.filter then
self.filter(conf_item)
end
end
end
end
self.conf_version = latest_version
return true
end
local function _automatic_fetch(premature, self)
if premature then
return
end
local i = 0
while not exiting() and self.running and i <= 32 do
i = i + 1
local ok, ok2, err = pcall(sync_data, self)
if not ok then
err = ok2
log.error("failed to fetch data from xds: ",
err, ", ", tostring(self))
ngx_sleep(3)
break
elseif not ok2 and err then
-- todo: handler other error
if err ~= "wait for more time" and err ~= "no keys" and self.last_err ~= err then
log.error("failed to fetch data from xds, ", err, ", ", tostring(self))
end
if err ~= self.last_err then
self.last_err = err
self.last_err_time = ngx_time()
else
if ngx_time() - self.last_err_time >= 30 then
self.last_err = nil
end
end
ngx_sleep(0.5)
elseif not ok2 then
ngx_sleep(0.05)
else
ngx_sleep(0.1)
end
end
if not exiting() and self.running then
ngx_timer_at(0, _automatic_fetch, self)
end
end
local function fetch_version(premature)
if premature then
return
end
local version = conf_ver:get("version")
if not version then
return
end
if version ~= latest_version then
latest_version = version
end
end
function _M.new(key, opts)
local automatic = opts and opts.automatic
local item_schema = opts and opts.item_schema
local filter_fun = opts and opts.filter
local single_item = opts and opts.single_item
local checker = opts and opts.checker
local obj = setmetatable({
automatic = automatic,
item_schema = item_schema,
checker = checker,
sync_times = 0,
running = true,
conf_version = 0,
values = nil,
routes_hash = nil,
prev_index = nil,
last_err = nil,
last_err_time = nil,
key = key,
single_item = single_item,
filter = filter_fun,
}, mt)
if automatic then
if not key then
return nil, "missing `key` argument"
end
-- blocking until xds completes initial configuration
while true do
os.usleep(1000)
fetch_version()
if latest_version then
break
end
end
local ok, ok2, err = pcall(sync_data, obj)
if not ok then
err = ok2
end
if err then
log.error("failed to fetch data from xds ",
err, ", ", key)
end
ngx_timer_at(0, _automatic_fetch, obj)
end
if key then
created_obj[key] = obj
end
return obj
end
function _M.get(self, key)
if not self.values_hash then
return
end
local arr_idx = self.values_hash[tostring(key)]
if not arr_idx then
return nil
end
return self.values[arr_idx]
end
function _M.fetch_created_obj(key)
return created_obj[key]
end
function _M.init_worker()
if process.type() == "privileged agent" then
load_libxds(xds_lib_name)
end
ngx_timer_every(1, fetch_version)
return true
end
return _M

View File

@@ -0,0 +1,579 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- Get configuration information in Stand-alone mode.
--
-- @module core.config_yaml
local config_local = require("apisix.core.config_local")
local config_util = require("apisix.core.config_util")
local yaml = require("lyaml")
local log = require("apisix.core.log")
local json = require("apisix.core.json")
local new_tab = require("table.new")
local check_schema = require("apisix.core.schema").check
local profile = require("apisix.core.profile")
local lfs = require("lfs")
local file = require("apisix.cli.file")
local exiting = ngx.worker.exiting
local insert_tab = table.insert
local type = type
local ipairs = ipairs
local setmetatable = setmetatable
local ngx_sleep = require("apisix.core.utils").sleep
local ngx_timer_at = ngx.timer.at
local ngx_time = ngx.time
local ngx_shared = ngx.shared
local sub_str = string.sub
local tostring = tostring
local pcall = pcall
local io = io
local ngx = ngx
local re_find = ngx.re.find
local process = require("ngx.process")
local worker_id = ngx.worker.id
local created_obj = {}
local shared_dict
local status_report_shared_dict_name = "status-report"
local _M = {
version = 0.2,
local_conf = config_local.local_conf,
clear_local_cache = config_local.clear_cache,
-- yaml or json
file_type = "yaml",
ERR_NO_SHARED_DICT = "failed prepare standalone config shared dict, this will degrade "..
"to event broadcasting, and if a worker crashes, the configuration "..
"cannot be restored from other workers and shared dict"
}
local mt = {
__index = _M,
__tostring = function(self)
return "apisix.yaml key: " .. (self.key or "")
end
}
local apisix_yaml
local apisix_yaml_mtime
local config_yaml = {
path = profile:yaml_path("apisix"),
type = "yaml",
parse = function(self)
local f, err = io.open(self.path, "r")
if not f then
return nil, "failed to open file " .. self.path .. " : " .. err
end
f:seek('end', -10)
local end_flag = f:read("*a")
local found_end_flag = re_find(end_flag, [[#END\s*$]], "jo")
if not found_end_flag then
f:close()
return nil, "missing valid end flag in file " .. self.path
end
f:seek('set')
local raw_config = f:read("*a")
f:close()
return yaml.load(raw_config), nil
end
}
local config_json = {
-- `-5` to remove the "yaml" suffix
path = config_yaml.path:sub(1, -5) .. "json",
type = "json",
parse = function(self)
local f, err = io.open(self.path, "r")
if not f then
return nil, "failed to open file " .. self.path .. " : " .. err
end
local raw_config = f:read("*a")
f:close()
local config, err = json.decode(raw_config)
if err then
return nil, "failed to decode json: " .. err
end
return config, nil
end
}
local config_file_table = {
yaml = config_yaml,
json = config_json
}
local config_file = setmetatable({}, {
__index = function(_, key)
return config_file_table[_M.file_type][key]
end
})
local function sync_status_to_shdict(status)
if process.type() ~= "worker" then
return
end
local status_shdict = ngx.shared[status_report_shared_dict_name]
if not status_shdict then
return
end
local id = worker_id()
log.info("sync status to shared dict, id: ", id, " status: ", status)
status_shdict:set(id, status)
end
local function update_config(table, conf_version)
if not table then
log.error("failed update config: empty table")
return
end
local ok, err = file.resolve_conf_var(table)
if not ok then
log.error("failed to resolve variables:" .. err)
return
end
apisix_yaml = table
sync_status_to_shdict(true)
apisix_yaml_mtime = conf_version
end
_M._update_config = update_config
local function is_use_admin_api()
local local_conf, _ = config_local.local_conf()
return local_conf and local_conf.apisix and local_conf.apisix.enable_admin
end
local function read_apisix_config(premature, pre_mtime)
if premature then
return
end
local attributes, err = lfs.attributes(config_file.path)
if not attributes then
log.error("failed to fetch ", config_file.path, " attributes: ", err)
return
end
local last_modification_time = attributes.modification
if apisix_yaml_mtime == last_modification_time then
return
end
local config_new, err = config_file:parse()
if err then
log.error("failed to parse the content of file ", config_file.path, ": ", err)
return
end
update_config(config_new, last_modification_time)
log.warn("config file ", config_file.path, " reloaded.")
end
local function sync_data(self)
if not self.key then
return nil, "missing 'key' arguments"
end
local conf_version
if is_use_admin_api() then
conf_version = apisix_yaml[self.conf_version_key] or 0
else
if not apisix_yaml_mtime then
log.warn("wait for more time")
return nil, "failed to read local file " .. config_file.path
end
conf_version = apisix_yaml_mtime
end
if not conf_version or conf_version == self.conf_version then
return true
end
local items = apisix_yaml[self.key]
if not items then
self.values = new_tab(8, 0)
self.values_hash = new_tab(0, 8)
self.conf_version = conf_version
return true
end
if self.values and #self.values > 0 then
if is_use_admin_api() then
-- filter self.values to retain only those whose IDs exist in the new items list.
local exist_values = new_tab(8, 0)
self.values_hash = new_tab(0, 8)
local exist_items = {}
for _, item in ipairs(items) do
exist_items[tostring(item.id)] = true
end
-- remove objects that exist in the self.values but do not exist in the new items.
-- for removed items, trigger cleanup handlers.
for _, item in ipairs(self.values) do
local id = item.value.id
if not exist_items[id] then
config_util.fire_all_clean_handlers(item)
else
insert_tab(exist_values, item)
self.values_hash[id] = #exist_values
end
end
self.values = exist_values
else
for _, item in ipairs(self.values) do
config_util.fire_all_clean_handlers(item)
end
self.values = nil
end
end
if self.single_item then
-- treat items as a single item
self.values = new_tab(1, 0)
self.values_hash = new_tab(0, 1)
local item = items
local modifiedIndex = item.modifiedIndex or conf_version
local conf_item = {value = item, modifiedIndex = modifiedIndex,
key = "/" .. self.key}
local data_valid = true
local err
if self.item_schema then
data_valid, err = check_schema(self.item_schema, item)
if not data_valid then
log.error("failed to check item data of [", self.key,
"] err:", err, " ,val: ", json.delay_encode(item))
end
if data_valid and self.checker then
-- TODO: An opts table should be used
-- as different checkers may use different parameters
data_valid, err = self.checker(item, conf_item.key)
if not data_valid then
log.error("failed to check item data of [", self.key,
"] err:", err, " ,val: ", json.delay_encode(item))
end
end
end
if data_valid then
insert_tab(self.values, conf_item)
self.values_hash[self.key] = #self.values
conf_item.clean_handlers = {}
if self.filter then
self.filter(conf_item)
end
end
else
if not self.values then
self.values = new_tab(8, 0)
self.values_hash = new_tab(0, 8)
end
local err
for i, item in ipairs(items) do
local idx = tostring(i)
local data_valid = true
if type(item) ~= "table" then
data_valid = false
log.error("invalid item data of [", self.key .. "/" .. idx,
"], val: ", json.delay_encode(item),
", it should be an object")
end
local id = item.id or item.username or ("arr_" .. idx)
local modifiedIndex = item.modifiedIndex or conf_version
local conf_item = {value = item, modifiedIndex = modifiedIndex,
key = "/" .. self.key .. "/" .. id}
if data_valid and self.item_schema then
data_valid, err = check_schema(self.item_schema, item)
if not data_valid then
log.error("failed to check item data of [", self.key,
"] err:", err, " ,val: ", json.delay_encode(item))
end
end
if data_valid and self.checker then
data_valid, err = self.checker(item, conf_item.key)
if not data_valid then
log.error("failed to check item data of [", self.key,
"] err:", err, " ,val: ", json.delay_encode(item))
end
end
if data_valid then
local item_id = tostring(id)
local pre_index = self.values_hash[item_id]
if pre_index then
-- remove the old item
local pre_val = self.values[pre_index]
if pre_val and
(not item.modifiedIndex or pre_val.modifiedIndex ~= item.modifiedIndex) then
config_util.fire_all_clean_handlers(pre_val)
self.values[pre_index] = conf_item
conf_item.value.id = item_id
conf_item.clean_handlers = {}
end
else
insert_tab(self.values, conf_item)
self.values_hash[item_id] = #self.values
conf_item.value.id = item_id
conf_item.clean_handlers = {}
end
if self.filter then
self.filter(conf_item)
end
end
end
end
self.conf_version = conf_version
return true
end
function _M.get(self, key)
if not self.values_hash then
return
end
local arr_idx = self.values_hash[tostring(key)]
if not arr_idx then
return nil
end
return self.values[arr_idx]
end
local function _automatic_fetch(premature, self)
if premature then
return
end
-- the _automatic_fetch is only called in the timer, and according to the
-- documentation, ngx.shared.DICT.get can be executed there.
-- if the file's global variables have not yet been assigned values,
-- we can assume that the worker has not been initialized yet and try to
-- read any old data that may be present from the shared dict
-- try load from shared dict only on first startup, otherwise use event mechanism
if is_use_admin_api() and not shared_dict then
log.info("try to load config from shared dict")
local config, err
shared_dict = ngx_shared["standalone-config"] -- init shared dict in current worker
if not shared_dict then
log.error("failed to read config from shared dict: shared dict not found")
goto SKIP_SHARED_DICT
end
config, err = shared_dict:get("config")
if not config then
if err then -- if the key does not exist, the return values are both nil
log.error("failed to read config from shared dict: ", err)
end
log.info("no config found in shared dict")
goto SKIP_SHARED_DICT
end
log.info("startup config loaded from shared dict: ", config)
config, err = json.decode(tostring(config))
if not config then
log.error("failed to decode config from shared dict: ", err)
goto SKIP_SHARED_DICT
end
_M._update_config(config)
log.info("config loaded from shared dict")
::SKIP_SHARED_DICT::
if not shared_dict then
log.crit(_M.ERR_NO_SHARED_DICT)
-- fill that value to make the worker not try to read from shared dict again
shared_dict = "error"
end
end
local i = 0
while not exiting() and self.running and i <= 32 do
i = i + 1
local ok, ok2, err = pcall(sync_data, self)
if not ok then
err = ok2
log.error("failed to fetch data from local file " .. config_file.path .. ": ",
err, ", ", tostring(self))
ngx_sleep(3)
break
elseif not ok2 and err then
if err ~= "timeout" and err ~= "Key not found"
and self.last_err ~= err then
log.error("failed to fetch data from local file " .. config_file.path .. ": ",
err, ", ", tostring(self))
end
if err ~= self.last_err then
self.last_err = err
self.last_err_time = ngx_time()
else
if ngx_time() - self.last_err_time >= 30 then
self.last_err = nil
end
end
ngx_sleep(0.5)
elseif not ok2 then
ngx_sleep(0.05)
else
ngx_sleep(0.1)
end
end
if not exiting() and self.running then
ngx_timer_at(0, _automatic_fetch, self)
end
end
function _M.new(key, opts)
local local_conf, err = config_local.local_conf()
if not local_conf then
return nil, err
end
local automatic = opts and opts.automatic
local item_schema = opts and opts.item_schema
local filter_fun = opts and opts.filter
local single_item = opts and opts.single_item
local checker = opts and opts.checker
-- like /routes and /upstreams, remove first char `/`
if key then
key = sub_str(key, 2)
end
local obj = setmetatable({
automatic = automatic,
item_schema = item_schema,
checker = checker,
sync_times = 0,
running = true,
conf_version = 0,
values = nil,
routes_hash = nil,
prev_index = nil,
last_err = nil,
last_err_time = nil,
key = key,
conf_version_key = key and key .. "_conf_version",
single_item = single_item,
filter = filter_fun,
}, mt)
if automatic then
if not key then
return nil, "missing `key` argument"
end
local ok, ok2, err = pcall(sync_data, obj)
if not ok then
err = ok2
end
if err then
log.error("failed to fetch data from local file ", config_file.path, ": ",
err, ", ", key)
end
ngx_timer_at(0, _automatic_fetch, obj)
end
if key then
created_obj[key] = obj
end
return obj
end
function _M.close(self)
self.running = false
end
function _M.server_version(self)
return "apisix.yaml " .. _M.version
end
function _M.fetch_created_obj(key)
return created_obj[sub_str(key, 2)]
end
function _M.fetch_all_created_obj()
return created_obj
end
function _M.init()
if is_use_admin_api() then
return true
end
read_apisix_config()
return true
end
function _M.init_worker()
sync_status_to_shdict(false)
if is_use_admin_api() then
apisix_yaml = {}
apisix_yaml_mtime = 0
return true
end
-- sync data in each non-master process
ngx.timer.every(1, read_apisix_config)
return true
end
return _M

View File

@@ -0,0 +1,463 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- Define the request context.
--
-- @module core.ctx
local core_str = require("apisix.core.string")
local core_tab = require("apisix.core.table")
local request = require("apisix.core.request")
local log = require("apisix.core.log")
local json = require("apisix.core.json")
local config_local = require("apisix.core.config_local")
local tablepool = require("tablepool")
local get_var = require("resty.ngxvar").fetch
local get_request = require("resty.ngxvar").request
local ck = require "resty.cookie"
local multipart = require("multipart")
local util = require("apisix.cli.util")
local gq_parse = require("graphql").parse
local jp = require("jsonpath")
local setmetatable = setmetatable
local sub_str = string.sub
local ngx = ngx
local ngx_var = ngx.var
local re_gsub = ngx.re.gsub
local ipairs = ipairs
local type = type
local error = error
local pcall = pcall
local _M = {version = 0.2}
local GRAPHQL_DEFAULT_MAX_SIZE = 1048576 -- 1MiB
local GRAPHQL_REQ_DATA_KEY = "query"
local GRAPHQL_REQ_METHOD_HTTP_GET = "GET"
local GRAPHQL_REQ_METHOD_HTTP_POST = "POST"
local GRAPHQL_REQ_MIME_JSON = "application/json"
local fetch_graphql_data = {
[GRAPHQL_REQ_METHOD_HTTP_GET] = function(ctx, max_size)
local body = request.get_uri_args(ctx)[GRAPHQL_REQ_DATA_KEY]
if not body then
return nil, "failed to read graphql data, args[" ..
GRAPHQL_REQ_DATA_KEY .. "] is nil"
end
if type(body) == "table" then
body = body[1]
end
return body
end,
[GRAPHQL_REQ_METHOD_HTTP_POST] = function(ctx, max_size)
local body, err = request.get_body(max_size, ctx)
if not body then
return nil, "failed to read graphql data, " .. (err or "request body has zero size")
end
if request.header(ctx, "Content-Type") == GRAPHQL_REQ_MIME_JSON then
local res
res, err = json.decode(body)
if not res then
return nil, "failed to read graphql data, " .. err
end
if not res[GRAPHQL_REQ_DATA_KEY] then
return nil, "failed to read graphql data, json body[" ..
GRAPHQL_REQ_DATA_KEY .. "] is nil"
end
body = res[GRAPHQL_REQ_DATA_KEY]
end
return body
end
}
local function parse_graphql(ctx)
local local_conf, err = config_local.local_conf()
if not local_conf then
return nil, "failed to get local conf: " .. err
end
local max_size = GRAPHQL_DEFAULT_MAX_SIZE
local size = core_tab.try_read_attr(local_conf, "graphql", "max_size")
if size then
max_size = size
end
local method = request.get_method()
local func = fetch_graphql_data[method]
if not func then
return nil, "graphql not support `" .. method .. "` request"
end
local body
body, err = func(ctx, max_size)
if not body then
return nil, err
end
local ok, res = pcall(gq_parse, body)
if not ok then
return nil, "failed to parse graphql: " .. res .. " body: " .. body
end
if #res.definitions == 0 then
return nil, "empty graphql: " .. body
end
return res
end
local function get_parsed_graphql()
local ctx = ngx.ctx.api_ctx
if ctx._graphql then
return ctx._graphql
end
local res, err = parse_graphql(ctx)
if not res then
log.error(err)
ctx._graphql = {}
return ctx._graphql
end
if #res.definitions > 1 then
log.warn("Multiple operations are not supported.",
"Only the first one is handled")
end
local def = res.definitions[1]
local fields = def.selectionSet.selections
local root_fields = core_tab.new(#fields, 0)
for i, f in ipairs(fields) do
root_fields[i] = f.name.value
end
local name = ""
if def.name and def.name.value then
name = def.name.value
end
ctx._graphql = {
name = name,
operation = def.operation,
root_fields = root_fields,
}
return ctx._graphql
end
local CONTENT_TYPE_JSON = "application/json"
local CONTENT_TYPE_FORM_URLENCODED = "application/x-www-form-urlencoded"
local CONTENT_TYPE_MULTIPART_FORM = "multipart/form-data"
local function get_parsed_request_body(ctx)
local ct_header = request.header(ctx, "Content-Type") or ""
if core_str.find(ct_header, CONTENT_TYPE_JSON) then
local request_table, err = request.get_json_request_body_table()
if not request_table then
return nil, "failed to parse JSON body: " .. err
end
return request_table
end
if core_str.find(ct_header, CONTENT_TYPE_FORM_URLENCODED) then
local args, err = request.get_post_args()
if not args then
return nil, "failed to parse form data: " .. (err or "unknown error")
end
return args
end
if core_str.find(ct_header, CONTENT_TYPE_MULTIPART_FORM) then
local body = request.get_body()
local res = multipart(body, ct_header)
if not res then
return nil, "failed to parse multipart form data"
end
return res:get_all()
end
local err = "unsupported content-type in header: " .. ct_header ..
", supported types are: " ..
CONTENT_TYPE_JSON .. ", " ..
CONTENT_TYPE_FORM_URLENCODED .. ", " ..
CONTENT_TYPE_MULTIPART_FORM
return nil, err
end
do
local var_methods = {
method = ngx.req.get_method,
cookie = function ()
if ngx.var.http_cookie then
return ck:new()
end
end
}
local no_cacheable_var_names = {
-- var.args should not be cached as it can be changed via set_uri_args
args = true,
is_args = true,
}
local ngx_var_names = {
upstream_scheme = true,
upstream_host = true,
upstream_upgrade = true,
upstream_connection = true,
upstream_uri = true,
upstream_mirror_host = true,
upstream_mirror_uri = true,
upstream_cache_zone = true,
upstream_cache_zone_info = true,
upstream_no_cache = true,
upstream_cache_key = true,
upstream_cache_bypass = true,
var_x_forwarded_proto = true,
var_x_forwarded_port = true,
var_x_forwarded_host = true,
}
-- sort in alphabetical
local apisix_var_names = {
balancer_ip = true,
balancer_port = true,
consumer_group_id = true,
consumer_name = true,
resp_body = function(ctx)
-- only for logger and requires the logger to have a special configuration
return ctx.resp_body or ''
end,
route_id = true,
route_name = true,
service_id = true,
service_name = true,
}
local mt = {
__index = function(t, key)
local cached = t._cache[key]
if cached ~= nil then
log.debug("serving ctx value from cache for key: ", key)
return cached
end
if type(key) ~= "string" then
error("invalid argument, expect string value", 2)
end
local val
local method = var_methods[key]
if method then
val = method()
elseif core_str.has_prefix(key, "cookie_") then
local cookie = t.cookie
if cookie then
local err
val, err = cookie:get(sub_str(key, 8))
if err then
log.warn("failed to fetch cookie value by key: ",
key, " error: ", err)
end
end
elseif core_str.has_prefix(key, "arg_") then
local arg_key = sub_str(key, 5)
local args = request.get_uri_args()[arg_key]
if args then
if type(args) == "table" then
val = args[1]
else
val = args
end
end
elseif core_str.has_prefix(key, "post_arg_") then
-- only match default post form
local content_type = request.header(nil, "Content-Type")
if content_type ~= nil and core_str.has_prefix(content_type,
"application/x-www-form-urlencoded") then
local arg_key = sub_str(key, 10)
local args = request.get_post_args()[arg_key]
if args then
if type(args) == "table" then
val = args[1]
else
val = args
end
end
end
elseif core_str.has_prefix(key, "uri_param_") then
-- `uri_param_<name>` provides access to the uri parameters when using
-- radixtree_uri_with_parameter
if t._ctx.curr_req_matched then
local arg_key = sub_str(key, 11)
val = t._ctx.curr_req_matched[arg_key]
end
elseif core_str.has_prefix(key, "http_") then
local arg_key = key:lower()
arg_key = re_gsub(arg_key, "-", "_", "jo")
val = get_var(arg_key, t._request)
elseif core_str.has_prefix(key, "graphql_") then
-- trim the "graphql_" prefix
local arg_key = sub_str(key, 9)
val = get_parsed_graphql()[arg_key]
elseif core_str.has_prefix(key, "post_arg.") then
-- trim the "post_arg." prefix (10 characters)
local arg_key = sub_str(key, 10)
local parsed_body, err = get_parsed_request_body(t._ctx)
if not parsed_body then
log.warn("failed to fetch post args value by key: ", arg_key, " error: ", err)
return nil
end
if arg_key:find("[%[%*]") or arg_key:find("..", 1, true) then
arg_key = "$." .. arg_key
local results = jp.query(parsed_body, arg_key)
if #results == 0 then
val = nil
else
val = results
end
else
local parts = util.split(arg_key, "(.)")
local current = parsed_body
for _, part in ipairs(parts) do
if type(current) ~= "table" then
current = nil
break
end
current = current[part]
end
val = current
end
else
local getter = apisix_var_names[key]
if getter then
local ctx = t._ctx
if getter == true then
val = ctx and ctx[key]
else
-- the getter is registered by ctx.register_var
val = getter(ctx)
end
else
val = get_var(key, t._request)
end
end
if val ~= nil and not no_cacheable_var_names[key] then
t._cache[key] = val
end
return val
end,
__newindex = function(t, key, val)
if ngx_var_names[key] then
ngx_var[key] = val
end
-- log.info("key: ", key, " new val: ", val)
t._cache[key] = val
end,
}
---
-- Register custom variables.
-- Register variables globally, and use them as normal builtin variables.
-- Note that the custom variables can't be used in features that depend
-- on the Nginx directive, like `access_log_format`.
--
-- @function core.ctx.register_var
-- @tparam string name custom variable name
-- @tparam function getter The fetch function for custom variables.
-- @tparam table opts An optional options table which controls the behavior about the variable
-- @usage
-- local core = require "apisix.core"
--
-- core.ctx.register_var("a6_labels_zone", function(ctx)
-- local route = ctx.matched_route and ctx.matched_route.value
-- if route and route.labels then
-- return route.labels.zone
-- end
-- return nil
-- end)
--
-- We support the options below in the `opts`:
-- * no_cacheable: if the result of getter is cacheable or not. Default to `false`.
function _M.register_var(name, getter, opts)
if type(getter) ~= "function" then
error("the getter of registered var should be a function")
end
apisix_var_names[name] = getter
if opts then
if opts.no_cacheable then
no_cacheable_var_names[name] = true
end
end
end
function _M.set_vars_meta(ctx)
local var = tablepool.fetch("ctx_var", 0, 32)
if not var._cache then
var._cache = {}
end
var._request = get_request()
var._ctx = ctx
setmetatable(var, mt)
ctx.var = var
end
function _M.release_vars(ctx)
if ctx.var == nil then
return
end
core_tab.clear(ctx.var._cache)
tablepool.release("ctx_var", ctx.var, true)
ctx.var = nil
end
end -- do
return _M

View File

@@ -0,0 +1,164 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- Wrapped dns search client.
--
-- @module core.dns.client
local require = require
local config_local = require("apisix.core.config_local")
local log = require("apisix.core.log")
local json = require("apisix.core.json")
local table = require("apisix.core.table")
local gcd = require("apisix.core.math").gcd
local insert_tab = table.insert
local math_random = math.random
local package_loaded = package.loaded
local ipairs = ipairs
local table_remove = table.remove
local setmetatable = setmetatable
local _M = {
RETURN_RANDOM = 1,
RETURN_ALL = 2,
}
local function resolve_srv(client, answers)
if #answers == 0 then
return nil, "empty SRV record"
end
local resolved_answers = {}
local answer_to_count = {}
for _, answer in ipairs(answers) do
if answer.type ~= client.TYPE_SRV then
return nil, "mess SRV with other record"
end
local resolved, err = client.resolve(answer.target)
if not resolved then
local msg = "failed to resolve SRV record " .. answer.target .. ": " .. err
return nil, msg
end
log.info("dns resolve SRV ", answer.target, ", result: ",
json.delay_encode(resolved))
local weight = answer.weight
if weight == 0 then
weight = 1
end
local count = #resolved
answer_to_count[answer] = count
-- one target may have multiple resolved results
for _, res in ipairs(resolved) do
local copy = table.deepcopy(res)
copy.weight = weight / count
copy.port = answer.port
copy.priority = answer.priority
insert_tab(resolved_answers, copy)
end
end
-- find the least common multiple of the counts
local lcm = answer_to_count[answers[1]]
for i = 2, #answers do
local count = answer_to_count[answers[i]]
lcm = count * lcm / gcd(count, lcm)
end
-- fix the weight as the weight should be integer
for _, res in ipairs(resolved_answers) do
res.weight = res.weight * lcm
end
return resolved_answers
end
function _M.resolve(self, domain, selector)
local client = self.client
-- this function will dereference the CNAME records
local answers, err = client.resolve(domain)
if not answers then
return nil, "failed to query the DNS server: " .. err
end
if answers.errcode then
return nil, "server returned error code: " .. answers.errcode
.. ": " .. answers.errstr
end
if selector == _M.RETURN_ALL then
log.info("dns resolve ", domain, ", result: ", json.delay_encode(answers))
for _, answer in ipairs(answers) do
if answer.type == client.TYPE_SRV then
return resolve_srv(client, answers)
end
end
return table.deepcopy(answers)
end
local idx = math_random(1, #answers)
local answer = answers[idx]
local dns_type = answer.type
if dns_type == client.TYPE_A or dns_type == client.TYPE_AAAA then
log.info("dns resolve ", domain, ", result: ", json.delay_encode(answer))
return table.deepcopy(answer)
end
return nil, "unsupported DNS answer"
end
function _M.new(opts)
local local_conf = config_local.local_conf()
if opts.enable_ipv6 == nil then
opts.enable_ipv6 = local_conf.apisix.enable_ipv6
end
-- ensure the resolver throws an error when ipv6 is disabled
if not opts.enable_ipv6 then
for i, v in ipairs(opts.order) do
if v == "AAAA" then
table_remove(opts.order, i)
break
end
end
end
opts.timeout = 2000 -- 2 sec
opts.retrans = 5 -- 5 retransmissions on receive timeout
-- make sure each client has its separate room
package_loaded["resty.dns.client"] = nil
local dns_client_mod = require("resty.dns.client")
local ok, err = dns_client_mod.init(opts)
if not ok then
return nil, "failed to init the dns client: " .. err
end
return setmetatable({client = dns_client_mod}, {__index = _M})
end
return _M

View File

@@ -0,0 +1,109 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local ffi = require "ffi"
local json = require("apisix.core.json")
local log = require("apisix.core.log")
local string = require("apisix.core.string")
local os = os
local type = type
local upper = string.upper
local find = string.find
local sub = string.sub
local str = ffi.string
local ENV_PREFIX = "$ENV://"
local _M = {
PREFIX = ENV_PREFIX
}
local apisix_env_vars = {}
ffi.cdef [[
extern char **environ;
]]
function _M.init()
local e = ffi.C.environ
if not e then
log.warn("could not access environment variables")
return
end
local i = 0
while e[i] ~= nil do
local var = str(e[i])
local p = find(var, "=")
if p then
apisix_env_vars[sub(var, 1, p - 1)] = sub(var, p + 1)
end
i = i + 1
end
end
local function parse_env_uri(env_uri)
-- Avoid the error caused by has_prefix to cause a crash.
if type(env_uri) ~= "string" then
return nil, "error env_uri type: " .. type(env_uri)
end
if not string.has_prefix(upper(env_uri), ENV_PREFIX) then
return nil, "error env_uri prefix: " .. env_uri
end
local path = sub(env_uri, #ENV_PREFIX + 1)
local idx = find(path, "/")
if not idx then
return {key = path, sub_key = ""}
end
local key = sub(path, 1, idx - 1)
local sub_key = sub(path, idx + 1)
return {
key = key,
sub_key = sub_key
}
end
function _M.fetch_by_uri(env_uri)
log.info("fetching data from env uri: ", env_uri)
local opts, err = parse_env_uri(env_uri)
if not opts then
return nil, err
end
local main_value = apisix_env_vars[opts.key] or os.getenv(opts.key)
if main_value and opts.sub_key ~= "" then
local vt, err = json.decode(main_value)
if not vt then
return nil, "decode failed, err: " .. (err or "") .. ", value: " .. main_value
end
return vt[opts.sub_key]
end
return main_value
end
return _M

Some files were not shown because too many files have changed in this diff Show More