feat(apisix): add Cloudron package

- Implements Apache APISIX packaging for Cloudron platform.
- Includes Dockerfile, CloudronManifest.json, and start.sh.
- Configured to use Cloudron's etcd addon.

🤖 Generated with Gemini CLI
Co-Authored-By: Gemini <noreply@google.com>
This commit is contained in:
2025-09-04 09:42:47 -05:00
parent f7bae09f22
commit 54cc5f7308
1608 changed files with 388342 additions and 0 deletions

View File

@@ -0,0 +1,40 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local pkg_cpath_org = package.cpath
local pkg_path_org = package.path
local _, find_pos_end = string.find(pkg_path_org, ";", -1, true)
if not find_pos_end then
pkg_path_org = pkg_path_org .. ";"
end
local apisix_home = "/usr/local/apisix"
local pkg_cpath = apisix_home .. "/deps/lib64/lua/5.1/?.so;"
.. apisix_home .. "/deps/lib/lua/5.1/?.so;"
local pkg_path_deps = apisix_home .. "/deps/share/lua/5.1/?.lua;"
local pkg_path_env = apisix_home .. "/?.lua;"
-- modify the load path to load our dependencies
package.cpath = pkg_cpath .. pkg_cpath_org
package.path = pkg_path_deps .. pkg_path_org .. pkg_path_env
-- pass path to construct the final result
local env = require("apisix.cli.env")(apisix_home, pkg_cpath_org, pkg_path_org)
local ops = require("apisix.cli.ops")
ops.execute(env, arg)

View File

@@ -0,0 +1,385 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local table_conact = table.concat
local _M = {
apisix = {
node_listen = { 9080 },
enable_admin = true,
enable_dev_mode = false,
enable_reuseport = true,
show_upstream_status_in_response_header = false,
enable_ipv6 = true,
enable_http2 = true,
enable_server_tokens = true,
extra_lua_path = "",
extra_lua_cpath = "",
proxy_cache = {
cache_ttl = "10s",
zones = {
{
name = "disk_cache_one",
memory_size = "50m",
disk_size = "1G",
disk_path = "/tmp/disk_cache_one",
cache_levels = "1:2"
},
{
name = "memory_cache",
memory_size = "50m"
}
}
},
delete_uri_tail_slash = false,
normalize_uri_like_servlet = false,
router = {
http = "radixtree_host_uri",
ssl = "radixtree_sni"
},
proxy_mode = "http",
resolver_timeout = 5,
enable_resolv_search_opt = true,
ssl = {
enable = true,
listen = { {
port = 9443,
enable_http3 = false
} },
ssl_protocols = "TLSv1.2 TLSv1.3",
ssl_ciphers = table_conact({
"ECDHE-ECDSA-AES128-GCM-SHA256", "ECDHE-RSA-AES128-GCM-SHA256",
"ECDHE-ECDSA-AES256-GCM-SHA384", "ECDHE-RSA-AES256-GCM-SHA384",
"ECDHE-ECDSA-CHACHA20-POLY1305", "ECDHE-RSA-CHACHA20-POLY1305",
"DHE-RSA-AES128-GCM-SHA256", "DHE-RSA-AES256-GCM-SHA384",
}, ":"),
ssl_session_tickets = false,
ssl_trusted_certificate = "system"
},
enable_control = true,
disable_sync_configuration_during_start = false,
data_encryption = {
enable_encrypt_fields = true,
keyring = { "qeddd145sfvddff3", "edd1c9f0985e76a2" }
},
events = {
module = "lua-resty-events"
}
},
nginx_config = {
error_log = "logs/error.log",
error_log_level = "warn",
worker_processes = "auto",
enable_cpu_affinity = false,
worker_rlimit_nofile = 20480,
worker_shutdown_timeout = "240s",
max_pending_timers = 16384,
max_running_timers = 4096,
event = {
worker_connections = 10620
},
meta = {
lua_shared_dict = {
["prometheus-metrics"] = "15m",
["standalone-config"] = "10m",
["status-report"] = "1m",
}
},
stream = {
enable_access_log = false,
access_log = "logs/access_stream.log",
-- luacheck: push max code line length 300
access_log_format = "$remote_addr [$time_local] $protocol $status $bytes_sent $bytes_received $session_time",
-- luacheck: pop
access_log_format_escape = "default",
lua_shared_dict = {
["etcd-cluster-health-check-stream"] = "10m",
["lrucache-lock-stream"] = "10m",
["plugin-limit-conn-stream"] = "10m",
["worker-events-stream"] = "10m",
["tars-stream"] = "1m",
["upstream-healthcheck-stream"] = "10m",
}
},
main_configuration_snippet = "",
http_configuration_snippet = "",
http_server_configuration_snippet = "",
http_server_location_configuration_snippet = "",
http_admin_configuration_snippet = "",
http_end_configuration_snippet = "",
stream_configuration_snippet = "",
http = {
enable_access_log = true,
access_log = "logs/access.log",
access_log_buffer = 16384,
-- luacheck: push max code line length 300
access_log_format =
'$remote_addr - $remote_user [$time_local] $http_host "$request" $status $body_bytes_sent $request_time "$http_referer" "$http_user_agent" $upstream_addr $upstream_status $upstream_response_time "$upstream_scheme://$upstream_host$upstream_uri"',
-- luacheck: pop
access_log_format_escape = "default",
keepalive_timeout = "60s",
client_header_timeout = "60s",
client_body_timeout = "60s",
client_max_body_size = 0,
send_timeout = "10s",
underscores_in_headers = "on",
real_ip_header = "X-Real-IP",
real_ip_recursive = "off",
real_ip_from = { "127.0.0.1", "unix:" },
proxy_ssl_server_name = true,
upstream = {
keepalive = 320,
keepalive_requests = 1000,
keepalive_timeout = "60s"
},
charset = "utf-8",
variables_hash_max_size = 2048,
lua_shared_dict = {
["internal-status"] = "10m",
["plugin-limit-req"] = "10m",
["plugin-limit-count"] = "10m",
["prometheus-metrics"] = "10m",
["plugin-limit-conn"] = "10m",
["upstream-healthcheck"] = "10m",
["worker-events"] = "10m",
["lrucache-lock"] = "10m",
["balancer-ewma"] = "10m",
["balancer-ewma-locks"] = "10m",
["balancer-ewma-last-touched-at"] = "10m",
["plugin-limit-req-redis-cluster-slot-lock"] = "1m",
["plugin-limit-count-redis-cluster-slot-lock"] = "1m",
["plugin-limit-conn-redis-cluster-slot-lock"] = "1m",
["plugin-ai-rate-limiting"] = "10m",
["plugin-ai-rate-limiting-reset-header"] = "10m",
tracing_buffer = "10m",
["plugin-api-breaker"] = "10m",
["etcd-cluster-health-check"] = "10m",
discovery = "1m",
jwks = "1m",
introspection = "10m",
["access-tokens"] = "1m",
["ext-plugin"] = "1m",
tars = "1m",
["cas-auth"] = "10m",
["ocsp-stapling"] = "10m",
["mcp-session"] = "10m",
}
}
},
graphql = {
max_size = 1048576
},
plugins = {
"real-ip",
"ai",
"client-control",
"proxy-control",
"request-id",
"zipkin",
"ext-plugin-pre-req",
"fault-injection",
"mocking",
"serverless-pre-function",
"cors",
"ip-restriction",
"ua-restriction",
"referer-restriction",
"csrf",
"uri-blocker",
"request-validation",
"chaitin-waf",
"multi-auth",
"openid-connect",
"cas-auth",
"authz-casbin",
"authz-casdoor",
"wolf-rbac",
"ldap-auth",
"hmac-auth",
"basic-auth",
"jwt-auth",
"jwe-decrypt",
"key-auth",
"consumer-restriction",
"attach-consumer-label",
"forward-auth",
"opa",
"authz-keycloak",
"proxy-cache",
"body-transformer",
"ai-prompt-template",
"ai-prompt-decorator",
"ai-prompt-guard",
"ai-rag",
"ai-rate-limiting",
"ai-proxy-multi",
"ai-proxy",
"ai-aws-content-moderation",
"proxy-mirror",
"proxy-rewrite",
"workflow",
"api-breaker",
"limit-conn",
"limit-count",
"limit-req",
"gzip",
-- deprecated and will be removed in a future release
-- "server-info",
"traffic-split",
"redirect",
"response-rewrite",
"mcp-bridge",
"degraphql",
"kafka-proxy",
"grpc-transcode",
"grpc-web",
"http-dubbo",
"public-api",
"prometheus",
"datadog",
"lago",
"loki-logger",
"elasticsearch-logger",
"echo",
"loggly",
"http-logger",
"splunk-hec-logging",
"skywalking-logger",
"google-cloud-logging",
"sls-logger",
"tcp-logger",
"kafka-logger",
"rocketmq-logger",
"syslog",
"udp-logger",
"file-logger",
"clickhouse-logger",
"tencent-cloud-cls",
"inspect",
"example-plugin",
"aws-lambda",
"azure-functions",
"openwhisk",
"openfunction",
"serverless-post-function",
"ext-plugin-post-req",
"ext-plugin-post-resp",
"ai-request-rewrite",
},
stream_plugins = { "ip-restriction", "limit-conn", "mqtt-proxy", "syslog" },
plugin_attr = {
["log-rotate"] = {
timeout = 10000,
interval = 3600,
max_kept = 168,
max_size = -1,
enable_compression = false
},
skywalking = {
service_name = "APISIX",
service_instance_name = "APISIX Instance Name",
endpoint_addr = "http://127.0.0.1:12800",
report_interval = 3
},
opentelemetry = {
trace_id_source = "x-request-id",
resource = {
["service.name"] = "APISIX"
},
collector = {
address = "127.0.0.1:4318",
request_timeout = 3,
request_headers = {
Authorization = "token"
}
},
batch_span_processor = {
drop_on_queue_full = false,
max_queue_size = 1024,
batch_timeout = 2,
inactive_timeout = 1,
max_export_batch_size = tonumber(os.getenv("OTEL_BSP_MAX_EXPORT_BATCH_SIZE")) or 16
},
set_ngx_var = false
},
prometheus = {
export_uri = "/apisix/prometheus/metrics",
metric_prefix = "apisix_",
enable_export_server = true,
export_addr = {
ip = "127.0.0.1",
port = 9091
}
},
["server-info"] = {
report_ttl = 60
},
["dubbo-proxy"] = {
upstream_multiplex_count = 32
},
["proxy-mirror"] = {
timeout = {
connect = "60s",
read = "60s",
send = "60s"
}
},
inspect = {
delay = 3,
hooks_file = "/usr/local/apisix/plugin_inspect_hooks.lua"
},
zipkin = {
set_ngx_var = false
}
},
deployment = {
role = "traditional",
role_traditional = {
config_provider = "etcd"
},
admin = {
admin_key_required = true,
admin_key = {
{
name = "admin",
key = "",
role = "admin"
}
},
enable_admin_cors = true,
enable_admin_ui = true,
allow_admin = { "127.0.0.0/24" },
admin_listen = {
ip = "0.0.0.0",
port = 9180
},
admin_api_version = "v3"
},
etcd = {
host = { "http://127.0.0.1:2379" },
prefix = "/apisix",
timeout = 30,
watch_timeout = 50,
startup_retry = 2,
tls = {
verify = true
}
}
}
}
return _M

View File

@@ -0,0 +1,115 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local require = require
local util = require("apisix.cli.util")
local pcall = pcall
local error = error
local exit = os.exit
local stderr = io.stderr
local str_find = string.find
local arg = arg
local package = package
local tonumber = tonumber
return function (apisix_home, pkg_cpath_org, pkg_path_org)
-- ulimit setting should be checked when APISIX starts
local res, err = util.execute_cmd("ulimit -n")
if not res then
error("failed to exec ulimit cmd \'ulimit -n \', err: " .. err)
end
local trimed_res = util.trim(res)
local ulimit = trimed_res == "unlimited" and trimed_res or tonumber(trimed_res)
if not ulimit then
error("failed to fetch current maximum number of open file descriptors")
end
-- only for developer, use current folder as working space
local is_root_path = false
local script_path = arg[0]
if script_path:sub(1, 2) == './' then
apisix_home = util.trim(util.execute_cmd("pwd"))
if not apisix_home then
error("failed to fetch current path")
end
-- determine whether the current path is under the "/root" folder.
-- "/root/" is the root folder flag.
if str_find(apisix_home .. "/", '/root/', nil, true) == 1 then
is_root_path = true
end
local pkg_cpath = apisix_home .. "/deps/lib64/lua/5.1/?.so;"
.. apisix_home .. "/deps/lib/lua/5.1/?.so;"
local pkg_path = apisix_home .. "/?/init.lua;"
.. apisix_home .. "/deps/share/lua/5.1/?/init.lua;"
.. apisix_home .. "/deps/share/lua/5.1/?.lua;;"
package.cpath = pkg_cpath .. package.cpath
package.path = pkg_path .. package.path
end
do
-- skip luajit environment
local ok = pcall(require, "table.new")
if not ok then
local ok, json = pcall(require, "cjson")
if ok and json then
stderr:write("please remove the cjson library in Lua, it may "
.. "conflict with the cjson library in openresty. "
.. "\n luarocks remove lua-cjson\n")
exit(1)
end
end
end
-- pre-transform openresty path
res, err = util.execute_cmd("command -v openresty")
if not res then
error("failed to exec cmd \'command -v openresty\', err: " .. err)
end
local openresty_path_abs = util.trim(res)
local openresty_args = openresty_path_abs .. [[ -p ]] .. apisix_home .. [[ -c ]]
.. apisix_home .. [[/conf/nginx.conf]]
local or_info, err = util.execute_cmd("openresty -V 2>&1")
if not or_info then
error("failed to exec cmd \'openresty -V 2>&1\', err: " .. err)
end
local use_apisix_base = true
if not or_info:find("apisix-nginx-module", 1, true) then
use_apisix_base = false
end
local min_etcd_version = "3.4.0"
return {
apisix_home = apisix_home,
is_root_path = is_root_path,
openresty_args = openresty_args,
openresty_info = or_info,
use_apisix_base = use_apisix_base,
pkg_cpath_org = pkg_cpath_org,
pkg_path_org = pkg_path_org,
min_etcd_version = min_etcd_version,
ulimit = ulimit,
}
end

View File

@@ -0,0 +1,405 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local base64_encode = require("base64").encode
local dkjson = require("dkjson")
local constants = require("apisix.constants")
local util = require("apisix.cli.util")
local file = require("apisix.cli.file")
local http = require("socket.http")
local https = require("ssl.https")
local ltn12 = require("ltn12")
local type = type
local ipairs = ipairs
local pairs = pairs
local print = print
local tonumber = tonumber
local str_format = string.format
local str_sub = string.sub
local table_concat = table.concat
local table_insert = table.insert
local io_stderr = io.stderr
local _M = {}
-- Timeout for all I/O operations
http.TIMEOUT = 3
local function parse_semantic_version(ver)
local errmsg = "invalid semantic version: " .. ver
local parts = util.split(ver, "-")
if #parts > 2 then
return nil, errmsg
end
if #parts == 2 then
ver = parts[1]
end
local fields = util.split(ver, ".")
if #fields ~= 3 then
return nil, errmsg
end
local major = tonumber(fields[1])
local minor = tonumber(fields[2])
local patch = tonumber(fields[3])
if not (major and minor and patch) then
return nil, errmsg
end
return {
major = major,
minor = minor,
patch = patch,
}
end
local function compare_semantic_version(v1, v2)
local ver1, err = parse_semantic_version(v1)
if not ver1 then
return nil, err
end
local ver2, err = parse_semantic_version(v2)
if not ver2 then
return nil, err
end
if ver1.major ~= ver2.major then
return ver1.major < ver2.major
end
if ver1.minor ~= ver2.minor then
return ver1.minor < ver2.minor
end
return ver1.patch < ver2.patch
end
local function request(url, yaml_conf)
local response_body = {}
local single_request = false
if type(url) == "string" then
url = {
url = url,
method = "GET",
sink = ltn12.sink.table(response_body),
}
single_request = true
end
local res, code
if str_sub(url.url, 1, 8) == "https://" then
local verify = "peer"
if yaml_conf.etcd.tls then
local cfg = yaml_conf.etcd.tls
if cfg.verify == false then
verify = "none"
end
url.certificate = cfg.cert
url.key = cfg.key
local apisix_ssl = yaml_conf.apisix.ssl
if apisix_ssl and apisix_ssl.ssl_trusted_certificate then
url.cafile = apisix_ssl.ssl_trusted_certificate
end
end
url.verify = verify
res, code = https.request(url)
else
res, code = http.request(url)
end
-- In case of failure, request returns nil followed by an error message.
-- Else the first return value is the response body
-- and followed by the response status code.
if single_request and res ~= nil then
return table_concat(response_body), code
end
return res, code
end
local function prepare_dirs_via_http(yaml_conf, args, index, host, host_count)
local is_success = true
local errmsg
local auth_token
local user = yaml_conf.etcd.user
local password = yaml_conf.etcd.password
if user and password then
local auth_url = host .. "/v3/auth/authenticate"
local json_auth = {
name = user,
password = password
}
local post_json_auth = dkjson.encode(json_auth)
local response_body = {}
local res, err
local retry_time = 0
while retry_time < 2 do
res, err = request({
url = auth_url,
method = "POST",
source = ltn12.source.string(post_json_auth),
sink = ltn12.sink.table(response_body),
headers = {
["Content-Length"] = #post_json_auth
}
}, yaml_conf)
-- In case of failure, request returns nil followed by an error message.
-- Else the first return value is just the number 1
-- and followed by the response status code.
if res then
break
end
retry_time = retry_time + 1
print(str_format("Warning! Request etcd endpoint \'%s\' error, %s, retry time=%s",
auth_url, err, retry_time))
end
if not res then
errmsg = str_format("request etcd endpoint \"%s\" error, %s\n", auth_url, err)
util.die(errmsg)
end
local res_auth = table_concat(response_body)
local body_auth, _, err_auth = dkjson.decode(res_auth)
if err_auth or (body_auth and not body_auth["token"]) then
errmsg = str_format("got malformed auth message: \"%s\" from etcd \"%s\"\n",
res_auth, auth_url)
util.die(errmsg)
end
auth_token = body_auth.token
end
local dirs = {}
for name in pairs(constants.HTTP_ETCD_DIRECTORY) do
dirs[name] = true
end
for name in pairs(constants.STREAM_ETCD_DIRECTORY) do
dirs[name] = true
end
for dir_name in pairs(dirs) do
local key = (yaml_conf.etcd.prefix or "") .. dir_name .. "/"
local put_url = host .. "/v3/kv/put"
local post_json = '{"value":"' .. base64_encode("init_dir")
.. '", "key":"' .. base64_encode(key) .. '"}'
local response_body = {}
local headers = {["Content-Length"] = #post_json}
if auth_token then
headers["Authorization"] = auth_token
end
local res, err
local retry_time = 0
while retry_time < 2 do
res, err = request({
url = put_url,
method = "POST",
source = ltn12.source.string(post_json),
sink = ltn12.sink.table(response_body),
headers = headers
}, yaml_conf)
retry_time = retry_time + 1
if res then
break
end
print(str_format("Warning! Request etcd endpoint \'%s\' error, %s, retry time=%s",
put_url, err, retry_time))
end
if not res then
errmsg = str_format("request etcd endpoint \"%s\" error, %s\n", put_url, err)
util.die(errmsg)
end
local res_put = table_concat(response_body)
if res_put:find("404 page not found", 1, true) then
errmsg = str_format("gRPC gateway is not enabled in etcd cluster \"%s\",",
"which is required by Apache APISIX\n")
util.die(errmsg)
end
if res_put:find("CommonName of client sending a request against gateway", 1, true) then
errmsg = str_format("etcd \"client-cert-auth\" cannot be used with gRPC-gateway, "
.. "please configure the etcd username and password "
.. "in configuration file\n")
util.die(errmsg)
end
if res_put:find("error", 1, true) then
is_success = false
if (index == host_count) then
errmsg = str_format("got malformed key-put message: \"%s\" from etcd \"%s\"\n",
res_put, put_url)
util.die(errmsg)
end
break
end
if args and args["verbose"] then
print(res_put)
end
end
return is_success
end
local function prepare_dirs(yaml_conf, args, index, host, host_count)
return prepare_dirs_via_http(yaml_conf, args, index, host, host_count)
end
function _M.init(env, args)
-- read_yaml_conf
local yaml_conf, err = file.read_yaml_conf(env.apisix_home)
if not yaml_conf then
util.die("failed to read local yaml config of apisix: ", err)
end
if not yaml_conf.apisix then
util.die("failed to read `apisix` field from yaml file when init etcd")
end
if yaml_conf.deployment.config_provider ~= "etcd" then
return true
end
if not yaml_conf.etcd then
util.die("failed to read `etcd` field from yaml file when init etcd")
end
-- convert old single etcd config to multiple etcd config
if type(yaml_conf.etcd.host) == "string" then
yaml_conf.etcd.host = {yaml_conf.etcd.host}
end
local host_count = #(yaml_conf.etcd.host)
local scheme
for i = 1, host_count do
local host = yaml_conf.etcd.host[i]
local fields = util.split(host, "://")
if not fields then
util.die("malformed etcd endpoint: ", host, "\n")
end
if not scheme then
scheme = fields[1]
elseif scheme ~= fields[1] then
print([[WARNING: mixed protocols among etcd endpoints]])
end
end
-- check the etcd cluster version
local etcd_healthy_hosts = {}
for index, host in ipairs(yaml_conf.etcd.host) do
local version_url = host .. "/version"
local errmsg
local res, err
local retry_time = 0
local etcd = yaml_conf.etcd
local max_retry = tonumber(etcd.startup_retry) or 2
while retry_time < max_retry do
res, err = request(version_url, yaml_conf)
-- In case of failure, request returns nil followed by an error message.
-- Else the first return value is the response body
-- and followed by the response status code.
if res then
break
end
retry_time = retry_time + 1
print(str_format("Warning! Request etcd endpoint \'%s\' error, %s, retry time=%s",
version_url, err, retry_time))
end
if res then
local body, _, err = dkjson.decode(res)
if err or (body and not body["etcdcluster"]) then
errmsg = str_format("got malformed version message: \"%s\" from etcd \"%s\"\n", res,
version_url)
util.die(errmsg)
end
local cluster_version = body["etcdcluster"]
if compare_semantic_version(cluster_version, env.min_etcd_version) then
util.die("etcd cluster version ", cluster_version,
" is less than the required version ", env.min_etcd_version,
", please upgrade your etcd cluster\n")
end
table_insert(etcd_healthy_hosts, host)
else
io_stderr:write(str_format("request etcd endpoint \'%s\' error, %s\n", version_url,
err))
end
end
if #etcd_healthy_hosts <= 0 then
util.die("all etcd nodes are unavailable\n")
end
if (#etcd_healthy_hosts / host_count * 100) <= 50 then
util.die("the etcd cluster needs at least 50% and above healthy nodes\n")
end
-- access from the data plane to etcd should be read-only.
-- data plane writes to etcd may cause security issues.
if yaml_conf.deployment.role == "data_plane" then
print("access from the data plane to etcd should be read-only, "
.."skip initializing the data of etcd")
return true
end
print("trying to initialize the data of etcd")
local etcd_ok = false
for index, host in ipairs(etcd_healthy_hosts) do
if prepare_dirs(yaml_conf, args, index, host, host_count) then
etcd_ok = true
break
end
end
if not etcd_ok then
util.die("none of the configured etcd works well\n")
end
end
return _M

View File

@@ -0,0 +1,343 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local ngx = ngx
local yaml = require("lyaml")
local profile = require("apisix.core.profile")
local util = require("apisix.cli.util")
local schema = require("apisix.cli.schema")
local default_conf = require("apisix.cli.config")
local dkjson = require("dkjson")
local pl_path = require("pl.path")
local pairs = pairs
local type = type
local tonumber = tonumber
local getenv = os.getenv
local str_gmatch = string.gmatch
local str_find = string.find
local str_sub = string.sub
local print = print
local _M = {}
local exported_vars
function _M.get_exported_vars()
return exported_vars
end
local function is_empty_yaml_line(line)
return line == '' or str_find(line, '^%s*$') or str_find(line, '^%s*#')
end
local function tab_is_array(t)
local count = 0
for k, v in pairs(t) do
count = count + 1
end
return #t == count
end
local function var_sub(val)
local err
local var_used = false
-- we use '${{var}}' because '$var' and '${var}' are taken
-- by Nginx
local new_val = val:gsub("%$%{%{%s*([%w_]+[%:%=]?.-)%s*%}%}", function(var)
local i, j = var:find("%:%=")
local default
if i and j then
default = var:sub(i + 2, #var)
default = default:gsub('^%s*(.-)%s*$', '%1')
var = var:sub(1, i - 1)
end
local v = getenv(var) or default
if v then
if not exported_vars then
exported_vars = {}
end
exported_vars[var] = v
var_used = true
return v
end
err = "failed to handle configuration: " ..
"can't find environment variable " .. var
return ""
end)
return new_val, var_used, err
end
local function resolve_conf_var(conf)
local new_keys = {}
for key, val in pairs(conf) do
-- avoid re-iterating the table for already iterated key
if new_keys[key] then
goto continue
end
-- substitute environment variables from conf keys
if type(key) == "string" then
local new_key, _, err = var_sub(key)
if err then
return nil, err
end
if new_key ~= key then
new_keys[new_key] = "dummy" -- we only care about checking the key
conf.key = nil
conf[new_key] = val
key = new_key
end
end
if type(val) == "table" then
local ok, err = resolve_conf_var(val)
if not ok then
return nil, err
end
elseif type(val) == "string" then
local new_val, var_used, err = var_sub(val)
if err then
return nil, err
end
if var_used then
if tonumber(new_val) ~= nil then
new_val = tonumber(new_val)
elseif new_val == "true" then
new_val = true
elseif new_val == "false" then
new_val = false
end
end
conf[key] = new_val
end
::continue::
end
return true
end
_M.resolve_conf_var = resolve_conf_var
local function replace_by_reserved_env_vars(conf)
-- TODO: support more reserved environment variables
local v = getenv("APISIX_DEPLOYMENT_ETCD_HOST")
if v and conf["deployment"] and conf["deployment"]["etcd"] then
local val, _, err = dkjson.decode(v)
if err or not val then
print("parse ${APISIX_DEPLOYMENT_ETCD_HOST} failed, error:", err)
return
end
conf["deployment"]["etcd"]["host"] = val
end
end
local function path_is_multi_type(path, type_val)
if str_sub(path, 1, 14) == "nginx_config->" and
(type_val == "number" or type_val == "string") then
return true
end
if path == "apisix->node_listen" and type_val == "number" then
return true
end
if path == "apisix->data_encryption->keyring" then
return true
end
return false
end
local function merge_conf(base, new_tab, ppath)
ppath = ppath or ""
for key, val in pairs(new_tab) do
if type(val) == "table" then
if val == yaml.null then
base[key] = nil
elseif tab_is_array(val) then
base[key] = val
else
if base[key] == nil then
base[key] = {}
end
local ok, err = merge_conf(
base[key],
val,
ppath == "" and key or ppath .. "->" .. key
)
if not ok then
return nil, err
end
end
else
local type_val = type(val)
if base[key] == nil then
base[key] = val
elseif type(base[key]) ~= type_val then
local path = ppath == "" and key or ppath .. "->" .. key
if path_is_multi_type(path, type_val) then
base[key] = val
else
return nil, "failed to merge, path[" .. path .. "] expect: " ..
type(base[key]) .. ", but got: " .. type_val
end
else
base[key] = val
end
end
end
return base
end
function _M.read_yaml_conf(apisix_home)
if apisix_home then
profile.apisix_home = apisix_home .. "/"
end
local local_conf_path = profile:customized_yaml_path()
if not local_conf_path then
local_conf_path = profile:yaml_path("config")
end
local user_conf_yaml, err = util.read_file(local_conf_path)
if not user_conf_yaml then
return nil, err
end
local is_empty_file = true
for line in str_gmatch(user_conf_yaml .. '\n', '(.-)\r?\n') do
if not is_empty_yaml_line(line) then
is_empty_file = false
break
end
end
if not is_empty_file then
local user_conf = yaml.load(user_conf_yaml)
if not user_conf then
return nil, "invalid config.yaml file"
end
local ok, err = resolve_conf_var(user_conf)
if not ok then
return nil, err
end
ok, err = merge_conf(default_conf, user_conf)
if not ok then
return nil, err
end
end
-- fill the default value by the schema
local ok, err = schema.validate(default_conf)
if not ok then
return nil, err
end
if default_conf.deployment then
default_conf.deployment.config_provider = "etcd"
if default_conf.deployment.role == "traditional" then
default_conf.etcd = default_conf.deployment.etcd
if default_conf.deployment.role_traditional.config_provider == "yaml" then
default_conf.deployment.config_provider = "yaml"
end
elseif default_conf.deployment.role == "control_plane" then
default_conf.etcd = default_conf.deployment.etcd
default_conf.apisix.enable_admin = true
elseif default_conf.deployment.role == "data_plane" then
default_conf.etcd = default_conf.deployment.etcd
if default_conf.deployment.role_data_plane.config_provider == "yaml" then
default_conf.deployment.config_provider = "yaml"
elseif default_conf.deployment.role_data_plane.config_provider == "json" then
default_conf.deployment.config_provider = "json"
elseif default_conf.deployment.role_data_plane.config_provider == "xds" then
default_conf.deployment.config_provider = "xds"
end
default_conf.apisix.enable_admin = false
end
end
--- using `not ngx` to check whether the current execution environment is apisix cli module,
--- because it is only necessary to parse and validate `apisix.yaml` in apisix cli.
if default_conf.deployment.config_provider == "yaml" and not ngx then
local apisix_conf_path = profile:yaml_path("apisix")
local apisix_conf_yaml, _ = util.read_file(apisix_conf_path)
if apisix_conf_yaml then
local apisix_conf = yaml.load(apisix_conf_yaml)
if apisix_conf then
local ok, err = resolve_conf_var(apisix_conf)
if not ok then
return nil, err
end
end
end
end
local apisix_ssl = default_conf.apisix.ssl
if apisix_ssl and apisix_ssl.ssl_trusted_certificate then
-- default value is set to "system" during schema validation
if apisix_ssl.ssl_trusted_certificate == "system" then
local trusted_certs_path, err = util.get_system_trusted_certs_filepath()
if not trusted_certs_path then
util.die(err)
end
apisix_ssl.ssl_trusted_certificate = trusted_certs_path
else
-- During validation, the path is relative to PWD
-- When Nginx starts, the path is relative to conf
-- Therefore we need to check the absolute version instead
local cert_path = pl_path.abspath(apisix_ssl.ssl_trusted_certificate)
if not pl_path.exists(cert_path) then
util.die("certificate path", cert_path, "doesn't exist\n")
end
apisix_ssl.ssl_trusted_certificate = cert_path
end
end
replace_by_reserved_env_vars(default_conf)
return default_conf
end
return _M

View File

@@ -0,0 +1,66 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- IP match and verify module.
--
-- @module cli.ip
local mediador_ip = require("resty.mediador.ip")
local setmetatable = setmetatable
local _M = {}
local mt = { __index = _M }
---
-- create a instance of module cli.ip
--
-- @function cli.ip:new
-- @tparam string ip IP or CIDR.
-- @treturn instance of module if the given ip valid, nil and error message otherwise.
function _M.new(self, ip)
if not mediador_ip.valid(ip) then
return nil, "invalid ip"
end
local _ip = mediador_ip.parse(ip)
return setmetatable({ _ip = _ip }, mt)
end
---
-- Is that the given ip loopback?
--
-- @function cli.ip:is_loopback
-- @treturn boolean True if the given ip is the loopback, false otherwise.
function _M.is_loopback(self)
return self._ip and "loopback" == self._ip:range()
end
---
-- Is that the given ip unspecified?
--
-- @function cli.ip:is_unspecified
-- @treturn boolean True if the given ip is all the unspecified, false otherwise.
function _M.is_unspecified(self)
return self._ip and "unspecified" == self._ip:range()
end
return _M

View File

@@ -0,0 +1,998 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
return [=[
# Configuration File - Nginx Server Configs
# This is a read-only file, do not try to modify it.
{% if user and user ~= '' then %}
user {* user *};
{% end %}
master_process on;
worker_processes {* worker_processes *};
{% if os_name == "Linux" and enable_cpu_affinity == true then %}
worker_cpu_affinity auto;
{% end %}
# main configuration snippet starts
{% if main_configuration_snippet then %}
{* main_configuration_snippet *}
{% end %}
# main configuration snippet ends
error_log {* error_log *} {* error_log_level or "warn" *};
pid logs/nginx.pid;
worker_rlimit_nofile {* worker_rlimit_nofile *};
events {
accept_mutex off;
worker_connections {* event.worker_connections *};
}
worker_rlimit_core {* worker_rlimit_core *};
worker_shutdown_timeout {* worker_shutdown_timeout *};
env APISIX_PROFILE;
env PATH; # for searching external plugin runner's binary
# reserved environment variables for configuration
env APISIX_DEPLOYMENT_ETCD_HOST;
{% if envs then %}
{% for _, name in ipairs(envs) do %}
env {*name*};
{% end %}
{% end %}
{% if use_apisix_base then %}
thread_pool grpc-client-nginx-module threads=1;
lua {
{% if enabled_stream_plugins["prometheus"] then %}
lua_shared_dict prometheus-metrics {* meta.lua_shared_dict["prometheus-metrics"] *};
{% end %}
{% if standalone_with_admin_api then %}
lua_shared_dict standalone-config {* meta.lua_shared_dict["standalone-config"] *};
{% end %}
{% if status then %}
lua_shared_dict status-report {* meta.lua_shared_dict["status-report"] *};
{% end %}
lua_shared_dict nacos 10m;
}
{% if enabled_stream_plugins["prometheus"] and not enable_http then %}
http {
lua_package_path "{*extra_lua_path*}$prefix/deps/share/lua/5.1/?.lua;$prefix/deps/share/lua/5.1/?/init.lua;]=]
.. [=[{*apisix_lua_home*}/?.lua;{*apisix_lua_home*}/?/init.lua;;{*lua_path*};";
lua_package_cpath "{*extra_lua_cpath*}$prefix/deps/lib64/lua/5.1/?.so;]=]
.. [=[$prefix/deps/lib/lua/5.1/?.so;;]=]
.. [=[{*lua_cpath*};";
{% if enabled_stream_plugins["prometheus"] then %}
init_by_lua_block {
require "resty.core"
local process = require("ngx.process")
local ok, err = process.enable_privileged_agent()
if not ok then
ngx.log(ngx.ERR, "failed to enable privileged_agent: ", err)
end
}
init_worker_by_lua_block {
require("apisix.plugins.prometheus.exporter").http_init(true)
}
server {
{% if use_apisix_base then %}
listen {* prometheus_server_addr *} enable_process=privileged_agent;
{% else %}
listen {* prometheus_server_addr *};
{% end %}
access_log off;
location / {
content_by_lua_block {
local prometheus = require("apisix.plugins.prometheus.exporter")
prometheus.export_metrics(true)
}
}
location = /apisix/nginx_status {
allow 127.0.0.0/24;
deny all;
stub_status;
}
}
{% end %}
}
{% end %}
{% end %}
{% if enable_stream then %}
stream {
lua_package_path "{*extra_lua_path*}$prefix/deps/share/lua/5.1/?.lua;$prefix/deps/share/lua/5.1/?/init.lua;]=]
.. [=[{*apisix_lua_home*}/?.lua;{*apisix_lua_home*}/?/init.lua;;{*lua_path*};";
lua_package_cpath "{*extra_lua_cpath*}$prefix/deps/lib64/lua/5.1/?.so;]=]
.. [=[$prefix/deps/lib/lua/5.1/?.so;;]=]
.. [=[{*lua_cpath*};";
lua_socket_log_errors off;
{% if max_pending_timers then %}
lua_max_pending_timers {* max_pending_timers *};
{% end %}
{% if max_running_timers then %}
lua_max_running_timers {* max_running_timers *};
{% end %}
lua_shared_dict lrucache-lock-stream {* stream.lua_shared_dict["lrucache-lock-stream"] *};
lua_shared_dict etcd-cluster-health-check-stream {* stream.lua_shared_dict["etcd-cluster-health-check-stream"] *};
lua_shared_dict worker-events-stream {* stream.lua_shared_dict["worker-events-stream"] *};
{% if stream.lua_shared_dict["upstream-healthcheck-stream"] then %}
lua_shared_dict upstream-healthcheck-stream {* stream.lua_shared_dict["upstream-healthcheck-stream"] *};
{% end %}
{% if enabled_discoveries["tars"] then %}
lua_shared_dict tars-stream {* stream.lua_shared_dict["tars-stream"] *};
{% end %}
{% if enabled_stream_plugins["limit-conn"] then %}
lua_shared_dict plugin-limit-conn-stream {* stream.lua_shared_dict["plugin-limit-conn-stream"] *};
{% end %}
# for discovery shared dict
{% if discovery_shared_dicts then %}
{% for key, size in pairs(discovery_shared_dicts) do %}
lua_shared_dict {*key*}-stream {*size*};
{% end %}
{% end %}
resolver {% for _, dns_addr in ipairs(dns_resolver or {}) do %} {*dns_addr*} {% end %} {% if dns_resolver_valid then %} valid={*dns_resolver_valid*}{% end %} ipv6={% if enable_ipv6 then %}on{% else %}off{% end %};
resolver_timeout {*resolver_timeout*};
{% if ssl.ssl_trusted_certificate ~= nil then %}
lua_ssl_trusted_certificate {* ssl.ssl_trusted_certificate *};
{% end %}
# for stream logs, off by default
{% if stream.enable_access_log == true then %}
log_format main escape={* stream.access_log_format_escape *} '{* stream.access_log_format *}';
access_log {* stream.access_log *} main buffer=16384 flush=3;
{% end %}
# stream configuration snippet starts
{% if stream_configuration_snippet then %}
{* stream_configuration_snippet *}
{% end %}
# stream configuration snippet ends
upstream apisix_backend {
server 127.0.0.1:80;
balancer_by_lua_block {
apisix.stream_balancer_phase()
}
}
init_by_lua_block {
require "resty.core"
{% if lua_module_hook then %}
require "{* lua_module_hook *}"
{% end %}
apisix = require("apisix")
local dns_resolver = { {% for _, dns_addr in ipairs(dns_resolver or {}) do %} "{*dns_addr*}", {% end %} }
local args = {
dns_resolver = dns_resolver,
}
apisix.stream_init(args)
}
init_worker_by_lua_block {
apisix.stream_init_worker()
}
{% if (events.module or "") == "lua-resty-events" then %}
# the server block for lua-resty-events
server {
listen unix:{*apisix_lua_home*}/logs/stream_worker_events.sock;
access_log off;
content_by_lua_block {
require("resty.events.compat").run()
}
}
{% end %}
server {
{% for _, item in ipairs(stream_proxy.tcp or {}) do %}
listen {*item.addr*} {% if item.tls then %} ssl {% end %} {% if enable_reuseport then %} reuseport {% end %} {% if proxy_protocol and proxy_protocol.enable_tcp_pp then %} proxy_protocol {% end %};
{% end %}
{% for _, addr in ipairs(stream_proxy.udp or {}) do %}
listen {*addr*} udp {% if enable_reuseport then %} reuseport {% end %};
{% end %}
{% if tcp_enable_ssl then %}
ssl_certificate {* ssl.ssl_cert *};
ssl_certificate_key {* ssl.ssl_cert_key *};
ssl_client_hello_by_lua_block {
apisix.ssl_client_hello_phase()
}
ssl_certificate_by_lua_block {
apisix.ssl_phase()
}
{% end %}
{% if proxy_protocol and proxy_protocol.enable_tcp_pp_to_upstream then %}
proxy_protocol on;
{% end %}
preread_by_lua_block {
apisix.stream_preread_phase()
}
proxy_pass apisix_backend;
{% if use_apisix_base then %}
set $upstream_sni "apisix_backend";
proxy_ssl_server_name on;
proxy_ssl_name $upstream_sni;
{% end %}
log_by_lua_block {
apisix.stream_log_phase()
}
}
}
{% end %}
{% if enable_http then %}
http {
# put extra_lua_path in front of the builtin path
# so user can override the source code
lua_package_path "{*extra_lua_path*}$prefix/deps/share/lua/5.1/?.lua;$prefix/deps/share/lua/5.1/?/init.lua;]=]
.. [=[{*apisix_lua_home*}/?.lua;{*apisix_lua_home*}/?/init.lua;;{*lua_path*};";
lua_package_cpath "{*extra_lua_cpath*}$prefix/deps/lib64/lua/5.1/?.so;]=]
.. [=[$prefix/deps/lib/lua/5.1/?.so;;]=]
.. [=[{*lua_cpath*};";
{% if max_pending_timers then %}
lua_max_pending_timers {* max_pending_timers *};
{% end %}
{% if max_running_timers then %}
lua_max_running_timers {* max_running_timers *};
{% end %}
lua_shared_dict internal-status {* http.lua_shared_dict["internal-status"] *};
lua_shared_dict upstream-healthcheck {* http.lua_shared_dict["upstream-healthcheck"] *};
lua_shared_dict worker-events {* http.lua_shared_dict["worker-events"] *};
lua_shared_dict lrucache-lock {* http.lua_shared_dict["lrucache-lock"] *};
lua_shared_dict balancer-ewma {* http.lua_shared_dict["balancer-ewma"] *};
lua_shared_dict balancer-ewma-locks {* http.lua_shared_dict["balancer-ewma-locks"] *};
lua_shared_dict balancer-ewma-last-touched-at {* http.lua_shared_dict["balancer-ewma-last-touched-at"] *};
lua_shared_dict etcd-cluster-health-check {* http.lua_shared_dict["etcd-cluster-health-check"] *}; # etcd health check
# for discovery shared dict
{% if discovery_shared_dicts then %}
{% for key, size in pairs(discovery_shared_dicts) do %}
lua_shared_dict {*key*} {*size*};
{% end %}
{% end %}
{% if enabled_discoveries["tars"] then %}
lua_shared_dict tars {* http.lua_shared_dict["tars"] *};
{% end %}
{% if http.lua_shared_dict["plugin-ai-rate-limiting"] then %}
lua_shared_dict plugin-ai-rate-limiting {* http.lua_shared_dict["plugin-ai-rate-limiting"] *};
{% else %}
lua_shared_dict plugin-ai-rate-limiting 10m;
{% end %}
{% if http.lua_shared_dict["plugin-ai-rate-limiting"] then %}
lua_shared_dict plugin-ai-rate-limiting-reset-header {* http.lua_shared_dict["plugin-ai-rate-limiting-reset-header"] *};
{% else %}
lua_shared_dict plugin-ai-rate-limiting-reset-header 10m;
{% end %}
{% if enabled_plugins["limit-conn"] then %}
lua_shared_dict plugin-limit-conn {* http.lua_shared_dict["plugin-limit-conn"] *};
lua_shared_dict plugin-limit-conn-redis-cluster-slot-lock {* http.lua_shared_dict["plugin-limit-conn-redis-cluster-slot-lock"] *};
{% end %}
{% if enabled_plugins["limit-req"] then %}
lua_shared_dict plugin-limit-req-redis-cluster-slot-lock {* http.lua_shared_dict["plugin-limit-req-redis-cluster-slot-lock"] *};
lua_shared_dict plugin-limit-req {* http.lua_shared_dict["plugin-limit-req"] *};
{% end %}
{% if enabled_plugins["limit-count"] then %}
lua_shared_dict plugin-limit-count {* http.lua_shared_dict["plugin-limit-count"] *};
lua_shared_dict plugin-limit-count-redis-cluster-slot-lock {* http.lua_shared_dict["plugin-limit-count-redis-cluster-slot-lock"] *};
lua_shared_dict plugin-limit-count-reset-header {* http.lua_shared_dict["plugin-limit-count"] *};
{% end %}
{% if enabled_plugins["prometheus"] and not enabled_stream_plugins["prometheus"] then %}
lua_shared_dict prometheus-metrics {* http.lua_shared_dict["prometheus-metrics"] *};
{% end %}
{% if enabled_plugins["skywalking"] then %}
lua_shared_dict tracing_buffer {* http.lua_shared_dict.tracing_buffer *}; # plugin: skywalking
{% end %}
{% if enabled_plugins["api-breaker"] then %}
lua_shared_dict plugin-api-breaker {* http.lua_shared_dict["plugin-api-breaker"] *};
{% end %}
{% if enabled_plugins["openid-connect"] or enabled_plugins["authz-keycloak"] then %}
# for openid-connect and authz-keycloak plugin
lua_shared_dict discovery {* http.lua_shared_dict["discovery"] *}; # cache for discovery metadata documents
{% end %}
{% if enabled_plugins["openid-connect"] then %}
# for openid-connect plugin
lua_shared_dict jwks {* http.lua_shared_dict["jwks"] *}; # cache for JWKs
lua_shared_dict introspection {* http.lua_shared_dict["introspection"] *}; # cache for JWT verification results
{% end %}
{% if enabled_plugins["cas-auth"] then %}
lua_shared_dict cas_sessions {* http.lua_shared_dict["cas-auth"] *};
{% end %}
{% if enabled_plugins["authz-keycloak"] then %}
# for authz-keycloak
lua_shared_dict access-tokens {* http.lua_shared_dict["access-tokens"] *}; # cache for service account access tokens
{% end %}
{% if enabled_plugins["ocsp-stapling"] then %}
lua_shared_dict ocsp-stapling {* http.lua_shared_dict["ocsp-stapling"] *}; # cache for ocsp-stapling
{% end %}
{% if enabled_plugins["ext-plugin-pre-req"] or enabled_plugins["ext-plugin-post-req"] then %}
lua_shared_dict ext-plugin {* http.lua_shared_dict["ext-plugin"] *}; # cache for ext-plugin
{% end %}
{% if enabled_plugins["mcp-bridge"] then %}
lua_shared_dict mcp-session {* http.lua_shared_dict["mcp-session"] *}; # cache for mcp-session
{% end %}
{% if config_center == "xds" then %}
lua_shared_dict xds-config 10m;
lua_shared_dict xds-config-version 1m;
{% end %}
# for custom shared dict
{% if http.custom_lua_shared_dict then %}
{% for cache_key, cache_size in pairs(http.custom_lua_shared_dict) do %}
lua_shared_dict {*cache_key*} {*cache_size*};
{% end %}
{% end %}
{% if enabled_plugins["error-log-logger"] then %}
lua_capture_error_log 10m;
{% end %}
lua_ssl_verify_depth 5;
ssl_session_timeout 86400;
{% if http.underscores_in_headers then %}
underscores_in_headers {* http.underscores_in_headers *};
{%end%}
lua_socket_log_errors off;
resolver {% for _, dns_addr in ipairs(dns_resolver or {}) do %} {*dns_addr*} {% end %} {% if dns_resolver_valid then %} valid={*dns_resolver_valid*}{% end %} ipv6={% if enable_ipv6 then %}on{% else %}off{% end %};
resolver_timeout {*resolver_timeout*};
lua_http10_buffering off;
lua_regex_match_limit 100000;
lua_regex_cache_max_entries 8192;
{% if http.enable_access_log == false then %}
access_log off;
{% else %}
log_format main escape={* http.access_log_format_escape *} '{* http.access_log_format *}';
uninitialized_variable_warn off;
{% if http.access_log_buffer then %}
access_log {* http.access_log *} main buffer={* http.access_log_buffer *} flush=3;
{% else %}
access_log {* http.access_log *} main buffer=16384 flush=3;
{% end %}
{% end %}
open_file_cache max=1000 inactive=60;
client_max_body_size {* http.client_max_body_size *};
keepalive_timeout {* http.keepalive_timeout *};
client_header_timeout {* http.client_header_timeout *};
client_body_timeout {* http.client_body_timeout *};
send_timeout {* http.send_timeout *};
variables_hash_max_size {* http.variables_hash_max_size *};
server_tokens off;
include mime.types;
charset {* http.charset *};
{% if http.real_ip_header then %}
real_ip_header {* http.real_ip_header *};
{% end %}
{% if http.real_ip_recursive then %}
real_ip_recursive {* http.real_ip_recursive *};
{% end %}
{% if http.real_ip_from then %}
{% for _, real_ip in ipairs(http.real_ip_from) do %}
set_real_ip_from {*real_ip*};
{% end %}
{% end %}
{% if ssl.ssl_trusted_certificate ~= nil then %}
lua_ssl_trusted_certificate {* ssl.ssl_trusted_certificate *};
{% end %}
# http configuration snippet starts
{% if http_configuration_snippet then %}
{* http_configuration_snippet *}
{% end %}
# http configuration snippet ends
upstream apisix_backend {
server 0.0.0.1;
{% if use_apisix_base then %}
keepalive {* http.upstream.keepalive *};
keepalive_requests {* http.upstream.keepalive_requests *};
keepalive_timeout {* http.upstream.keepalive_timeout *};
# we put the static configuration above so that we can override it in the Lua code
balancer_by_lua_block {
apisix.http_balancer_phase()
}
{% else %}
balancer_by_lua_block {
apisix.http_balancer_phase()
}
keepalive {* http.upstream.keepalive *};
keepalive_requests {* http.upstream.keepalive_requests *};
keepalive_timeout {* http.upstream.keepalive_timeout *};
{% end %}
}
{% if enabled_plugins["dubbo-proxy"] then %}
upstream apisix_dubbo_backend {
server 0.0.0.1;
balancer_by_lua_block {
apisix.http_balancer_phase()
}
# dynamical keepalive doesn't work with dubbo as the connection here
# is managed by ngx_multi_upstream_module
multi {* dubbo_upstream_multiplex_count *};
keepalive {* http.upstream.keepalive *};
keepalive_requests {* http.upstream.keepalive_requests *};
keepalive_timeout {* http.upstream.keepalive_timeout *};
}
{% end %}
{% if use_apisix_base then %}
apisix_delay_client_max_body_check on;
apisix_mirror_on_demand on;
{% end %}
{% if wasm then %}
wasm_vm wasmtime;
{% end %}
init_by_lua_block {
require "resty.core"
{% if lua_module_hook then %}
require "{* lua_module_hook *}"
{% end %}
apisix = require("apisix")
local dns_resolver = { {% for _, dns_addr in ipairs(dns_resolver or {}) do %} "{*dns_addr*}", {% end %} }
local args = {
dns_resolver = dns_resolver,
}
apisix.http_init(args)
-- set apisix_lua_home into constants module
-- it may be used by plugins to determine the work path of apisix
local constants = require("apisix.constants")
constants.apisix_lua_home = "{*apisix_lua_home*}"
}
init_worker_by_lua_block {
apisix.http_init_worker()
}
exit_worker_by_lua_block {
apisix.http_exit_worker()
}
{% if (events.module or "") == "lua-resty-events" then %}
# the server block for lua-resty-events
server {
listen unix:{*apisix_lua_home*}/logs/worker_events.sock;
access_log off;
location / {
content_by_lua_block {
require("resty.events.compat").run()
}
}
}
{% end %}
{% if enable_control then %}
server {
listen {* control_server_addr *};
access_log off;
location / {
content_by_lua_block {
apisix.http_control()
}
}
}
{% end %}
{% if status then %}
server {
listen {* status_server_addr *} enable_process=privileged_agent;
access_log off;
location /status {
content_by_lua_block {
apisix.status()
}
}
location /status/ready {
content_by_lua_block {
apisix.status_ready()
}
}
}
{% end %}
{% if enabled_plugins["prometheus"] and prometheus_server_addr then %}
server {
{% if use_apisix_base then %}
listen {* prometheus_server_addr *} enable_process=privileged_agent;
{% else %}
listen {* prometheus_server_addr *};
{% end %}
access_log off;
location / {
content_by_lua_block {
local prometheus = require("apisix.plugins.prometheus.exporter")
prometheus.export_metrics()
}
}
location = /apisix/nginx_status {
allow 127.0.0.0/24;
deny all;
stub_status;
}
}
{% end %}
{% if enable_admin then %}
server {
{%if https_admin then%}
listen {* admin_server_addr *} ssl;
ssl_certificate {* admin_api_mtls.admin_ssl_cert *};
ssl_certificate_key {* admin_api_mtls.admin_ssl_cert_key *};
{%if admin_api_mtls.admin_ssl_ca_cert and admin_api_mtls.admin_ssl_ca_cert ~= "" then%}
ssl_verify_client on;
ssl_client_certificate {* admin_api_mtls.admin_ssl_ca_cert *};
{% end %}
ssl_session_cache shared:SSL:20m;
ssl_protocols {* ssl.ssl_protocols *};
ssl_ciphers {* ssl.ssl_ciphers *};
ssl_prefer_server_ciphers on;
{% if ssl.ssl_session_tickets then %}
ssl_session_tickets on;
{% else %}
ssl_session_tickets off;
{% end %}
{% else %}
listen {* admin_server_addr *};
{%end%}
log_not_found off;
# admin configuration snippet starts
{% if http_admin_configuration_snippet then %}
{* http_admin_configuration_snippet *}
{% end %}
# admin configuration snippet ends
set $upstream_scheme 'http';
set $upstream_host $http_host;
set $upstream_uri '';
{%if allow_admin then%}
{% for _, allow_ip in ipairs(allow_admin) do %}
allow {*allow_ip*};
{% end %}
deny all;
{%else%}
allow all;
{%end%}
location /apisix/admin {
content_by_lua_block {
apisix.http_admin()
}
}
{% if enable_admin_ui then %}
location = /ui {
return 301 /ui/;
}
location ^~ /ui/ {
rewrite ^/ui/(.*)$ /$1 break;
root {* apisix_lua_home *}/ui;
try_files $uri /index.html =404;
gzip on;
gzip_types text/css application/javascript application/json;
expires 7200s;
add_header Cache-Control "private,max-age=7200";
}
{% end %}
}
{% end %}
{% if deployment_role ~= "control_plane" then %}
{% if enabled_plugins["proxy-cache"] then %}
# for proxy cache
{% for _, cache in ipairs(proxy_cache.zones) do %}
{% if cache.disk_path and cache.cache_levels and cache.disk_size then %}
proxy_cache_path {* cache.disk_path *} levels={* cache.cache_levels *} keys_zone={* cache.name *}:{* cache.memory_size *} inactive=1d max_size={* cache.disk_size *} use_temp_path=off;
{% else %}
lua_shared_dict {* cache.name *} {* cache.memory_size *};
{% end %}
{% end %}
map $upstream_cache_zone $upstream_cache_zone_info {
{% for _, cache in ipairs(proxy_cache.zones) do %}
{% if cache.disk_path and cache.cache_levels and cache.disk_size then %}
{* cache.name *} {* cache.disk_path *},{* cache.cache_levels *};
{% end %}
{% end %}
}
{% end %}
server {
{% if enable_http2 then %}
http2 on;
{% end %}
{% if enable_http3_in_server_context then %}
http3 on;
{% end %}
{% for _, item in ipairs(node_listen) do %}
listen {* item.ip *}:{* item.port *} default_server {% if enable_reuseport then %} reuseport {% end %};
{% end %}
{% if ssl.enable then %}
{% for _, item in ipairs(ssl.listen) do %}
{% if item.enable_http3 then %}
listen {* item.ip *}:{* item.port *} quic default_server {% if enable_reuseport then %} reuseport {% end %};
listen {* item.ip *}:{* item.port *} ssl default_server;
{% else %}
listen {* item.ip *}:{* item.port *} ssl default_server {% if enable_reuseport then %} reuseport {% end %};
{% end %}
{% end %}
{% end %}
{% if proxy_protocol and proxy_protocol.listen_http_port then %}
listen {* proxy_protocol.listen_http_port *} default_server proxy_protocol;
{% end %}
{% if proxy_protocol and proxy_protocol.listen_https_port then %}
listen {* proxy_protocol.listen_https_port *} ssl default_server proxy_protocol;
{% end %}
server_name _;
{% if ssl.enable then %}
ssl_certificate {* ssl.ssl_cert *};
ssl_certificate_key {* ssl.ssl_cert_key *};
ssl_session_cache shared:SSL:20m;
ssl_session_timeout 10m;
ssl_protocols {* ssl.ssl_protocols *};
ssl_ciphers {* ssl.ssl_ciphers *};
ssl_prefer_server_ciphers on;
{% if ssl.ssl_session_tickets then %}
ssl_session_tickets on;
{% else %}
ssl_session_tickets off;
{% end %}
{% end %}
{% if ssl.ssl_trusted_certificate ~= nil then %}
proxy_ssl_trusted_certificate {* ssl.ssl_trusted_certificate *};
{% end %}
# opentelemetry_set_ngx_var starts
{% if opentelemetry_set_ngx_var then %}
set $opentelemetry_context_traceparent '';
set $opentelemetry_trace_id '';
set $opentelemetry_span_id '';
{% end %}
# opentelemetry_set_ngx_var ends
# zipkin_set_ngx_var starts
{% if zipkin_set_ngx_var then %}
set $zipkin_context_traceparent '';
set $zipkin_trace_id '';
set $zipkin_span_id '';
{% end %}
# zipkin_set_ngx_var ends
# http server configuration snippet starts
{% if http_server_configuration_snippet then %}
{* http_server_configuration_snippet *}
{% end %}
# http server configuration snippet ends
location = /apisix/nginx_status {
allow 127.0.0.0/24;
deny all;
access_log off;
stub_status;
}
{% if ssl.enable then %}
ssl_client_hello_by_lua_block {
apisix.ssl_client_hello_phase()
}
ssl_certificate_by_lua_block {
apisix.ssl_phase()
}
{% end %}
{% if http.proxy_ssl_server_name then %}
proxy_ssl_name $upstream_host;
proxy_ssl_server_name on;
{% end %}
location / {
set $upstream_mirror_host '';
set $upstream_mirror_uri '';
set $upstream_upgrade '';
set $upstream_connection '';
set $upstream_scheme 'http';
set $upstream_host $http_host;
set $upstream_uri '';
set $ctx_ref '';
{% if wasm then %}
set $wasm_process_req_body '';
set $wasm_process_resp_body '';
{% end %}
# http server location configuration snippet starts
{% if http_server_location_configuration_snippet then %}
{* http_server_location_configuration_snippet *}
{% end %}
# http server location configuration snippet ends
{% if enabled_plugins["dubbo-proxy"] then %}
set $dubbo_service_name '';
set $dubbo_service_version '';
set $dubbo_method '';
{% end %}
access_by_lua_block {
apisix.http_access_phase()
}
proxy_http_version 1.1;
proxy_set_header Host $upstream_host;
proxy_set_header Upgrade $upstream_upgrade;
proxy_set_header Connection $upstream_connection;
proxy_set_header X-Real-IP $remote_addr;
proxy_pass_header Date;
### the following x-forwarded-* headers is to send to upstream server
set $var_x_forwarded_proto $scheme;
set $var_x_forwarded_host $host;
set $var_x_forwarded_port $server_port;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $var_x_forwarded_proto;
proxy_set_header X-Forwarded-Host $var_x_forwarded_host;
proxy_set_header X-Forwarded-Port $var_x_forwarded_port;
{% if enabled_plugins["proxy-cache"] then %}
### the following configuration is to cache response content from upstream server
set $upstream_cache_zone off;
set $upstream_cache_key '';
set $upstream_cache_bypass '';
set $upstream_no_cache '';
proxy_cache $upstream_cache_zone;
proxy_cache_valid any {% if proxy_cache.cache_ttl then %} {* proxy_cache.cache_ttl *} {% else %} 10s {% end %};
proxy_cache_min_uses 1;
proxy_cache_methods GET HEAD POST;
proxy_cache_lock_timeout 5s;
proxy_cache_use_stale off;
proxy_cache_key $upstream_cache_key;
proxy_no_cache $upstream_no_cache;
proxy_cache_bypass $upstream_cache_bypass;
{% end %}
proxy_pass $upstream_scheme://apisix_backend$upstream_uri;
{% if enabled_plugins["proxy-mirror"] then %}
mirror /proxy_mirror;
{% end %}
header_filter_by_lua_block {
apisix.http_header_filter_phase()
}
body_filter_by_lua_block {
apisix.http_body_filter_phase()
}
log_by_lua_block {
apisix.http_log_phase()
}
}
location @grpc_pass {
access_by_lua_block {
apisix.grpc_access_phase()
}
{% if use_apisix_base then %}
# For servers which obey the standard, when `:authority` is missing,
# `host` will be used instead. When used with apisix-runtime, we can do
# better by setting `:authority` directly
grpc_set_header ":authority" $upstream_host;
{% else %}
grpc_set_header "Host" $upstream_host;
{% end %}
grpc_set_header Content-Type application/grpc;
grpc_set_header TE trailers;
grpc_socket_keepalive on;
grpc_pass $upstream_scheme://apisix_backend;
{% if enabled_plugins["proxy-mirror"] then %}
mirror /proxy_mirror_grpc;
{% end %}
header_filter_by_lua_block {
apisix.http_header_filter_phase()
}
body_filter_by_lua_block {
apisix.http_body_filter_phase()
}
log_by_lua_block {
apisix.http_log_phase()
}
}
{% if enabled_plugins["dubbo-proxy"] then %}
location @dubbo_pass {
access_by_lua_block {
apisix.dubbo_access_phase()
}
dubbo_pass_all_headers on;
dubbo_pass_body on;
dubbo_pass $dubbo_service_name $dubbo_service_version $dubbo_method apisix_dubbo_backend;
header_filter_by_lua_block {
apisix.http_header_filter_phase()
}
body_filter_by_lua_block {
apisix.http_body_filter_phase()
}
log_by_lua_block {
apisix.http_log_phase()
}
}
{% end %}
{% if enabled_plugins["proxy-mirror"] then %}
location = /proxy_mirror {
internal;
{% if not use_apisix_base then %}
if ($upstream_mirror_uri = "") {
return 200;
}
{% end %}
{% if proxy_mirror_timeouts then %}
{% if proxy_mirror_timeouts.connect then %}
proxy_connect_timeout {* proxy_mirror_timeouts.connect *};
{% end %}
{% if proxy_mirror_timeouts.read then %}
proxy_read_timeout {* proxy_mirror_timeouts.read *};
{% end %}
{% if proxy_mirror_timeouts.send then %}
proxy_send_timeout {* proxy_mirror_timeouts.send *};
{% end %}
{% end %}
proxy_http_version 1.1;
proxy_set_header Host $upstream_host;
proxy_pass $upstream_mirror_uri;
}
{% end %}
{% if enabled_plugins["proxy-mirror"] then %}
location = /proxy_mirror_grpc {
internal;
{% if not use_apisix_base then %}
if ($upstream_mirror_uri = "") {
return 200;
}
{% end %}
{% if proxy_mirror_timeouts then %}
{% if proxy_mirror_timeouts.connect then %}
grpc_connect_timeout {* proxy_mirror_timeouts.connect *};
{% end %}
{% if proxy_mirror_timeouts.read then %}
grpc_read_timeout {* proxy_mirror_timeouts.read *};
{% end %}
{% if proxy_mirror_timeouts.send then %}
grpc_send_timeout {* proxy_mirror_timeouts.send *};
{% end %}
{% end %}
grpc_pass $upstream_mirror_host;
}
{% end %}
}
{% end %}
# http end configuration snippet starts
{% if http_end_configuration_snippet then %}
{* http_end_configuration_snippet *}
{% end %}
# http end configuration snippet ends
}
{% end %}
]=]

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,450 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local jsonschema = require("jsonschema")
local pairs = pairs
local pcall = pcall
local require = require
local _M = {}
local etcd_schema = {
type = "object",
properties = {
resync_delay = {
type = "integer",
},
user = {
type = "string",
},
password = {
type = "string",
},
tls = {
type = "object",
properties = {
cert = {
type = "string",
},
key = {
type = "string",
},
},
},
prefix = {
type = "string",
},
host = {
type = "array",
items = {
type = "string",
pattern = [[^https?://]]
},
minItems = 1,
},
timeout = {
type = "integer",
default = 30,
minimum = 1,
description = "etcd connection timeout in seconds",
},
},
required = {"prefix", "host"}
}
local config_schema = {
type = "object",
properties = {
apisix = {
properties = {
lua_module_hook = {
pattern = "^[a-zA-Z._-]+$",
},
proxy_protocol = {
type = "object",
properties = {
listen_http_port = {
type = "integer",
},
listen_https_port = {
type = "integer",
},
enable_tcp_pp = {
type = "boolean",
},
enable_tcp_pp_to_upstream = {
type = "boolean",
},
}
},
proxy_cache = {
type = "object",
properties = {
zones = {
type = "array",
minItems = 1,
items = {
type = "object",
properties = {
name = {
type = "string",
},
memory_size = {
type = "string",
},
disk_size = {
type = "string",
},
disk_path = {
type = "string",
},
cache_levels = {
type = "string",
},
},
oneOf = {
{
required = {"name", "memory_size"},
maxProperties = 2,
},
{
required = {"name", "memory_size", "disk_size",
"disk_path", "cache_levels"},
}
},
},
uniqueItems = true,
}
}
},
proxy_mode = {
type = "string",
enum = {"http", "stream", "http&stream"},
},
stream_proxy = {
type = "object",
properties = {
tcp = {
type = "array",
minItems = 1,
items = {
anyOf = {
{
type = "integer",
},
{
type = "string",
},
{
type = "object",
properties = {
addr = {
anyOf = {
{
type = "integer",
},
{
type = "string",
},
}
},
tls = {
type = "boolean",
}
},
required = {"addr"}
},
},
},
uniqueItems = true,
},
udp = {
type = "array",
minItems = 1,
items = {
anyOf = {
{
type = "integer",
},
{
type = "string",
},
},
},
uniqueItems = true,
},
}
},
dns_resolver = {
type = "array",
minItems = 1,
items = {
type = "string",
}
},
dns_resolver_valid = {
type = "integer",
},
enable_http2 = {
type = "boolean",
default = true
},
ssl = {
type = "object",
properties = {
ssl_trusted_certificate = {
type = "string",
default = "system"
},
listen = {
type = "array",
items = {
type = "object",
properties = {
ip = {
type = "string",
},
port = {
type = "integer",
minimum = 1,
maximum = 65535
},
enable_http3 = {
type = "boolean",
},
}
}
},
}
},
data_encryption = {
type = "object",
properties = {
keyring = {
anyOf = {
{
type = "array",
minItems = 1,
items = {
type = "string",
minLength = 16,
maxLength = 16
}
},
{
type = "string",
minLength = 16,
maxLength = 16
}
}
},
}
},
}
},
nginx_config = {
type = "object",
properties = {
envs = {
type = "array",
minItems = 1,
items = {
type = "string",
}
}
},
},
http = {
type = "object",
properties = {
custom_lua_shared_dict = {
type = "object",
}
}
},
etcd = etcd_schema,
plugins = {
type = "array",
default = {},
minItems = 0,
items = {
type = "string"
}
},
stream_plugins = {
type = "array",
default = {},
minItems = 0,
items = {
type = "string"
}
},
wasm = {
type = "object",
properties = {
plugins = {
type = "array",
minItems = 1,
items = {
type = "object",
properties = {
name = {
type = "string"
},
file = {
type = "string"
},
priority = {
type = "integer"
},
http_request_phase = {
enum = {"access", "rewrite"},
default = "access",
},
},
required = {"name", "file", "priority"}
}
}
}
},
deployment = {
type = "object",
properties = {
role = {
enum = {"traditional", "control_plane", "data_plane", "standalone"},
default = "traditional"
}
},
},
},
required = {"apisix", "deployment"},
}
local admin_schema = {
type = "object",
properties = {
admin_key = {
type = "array",
properties = {
items = {
properties = {
name = {type = "string"},
key = {type = "string"},
role = {type = "string"},
}
}
}
},
admin_listen = {
properties = {
listen = { type = "string" },
port = { type = "integer" },
},
default = {
listen = "0.0.0.0",
port = 9180,
}
},
https_admin = {
type = "boolean",
},
admin_key_required = {
type = "boolean",
},
}
}
local deployment_schema = {
traditional = {
properties = {
etcd = etcd_schema,
admin = admin_schema,
role_traditional = {
properties = {
config_provider = {
enum = {"etcd", "yaml"}
},
},
required = {"config_provider"}
}
},
required = {"etcd"}
},
control_plane = {
properties = {
etcd = etcd_schema,
admin = admin_schema,
role_control_plane = {
properties = {
config_provider = {
enum = {"etcd"}
},
},
required = {"config_provider"}
},
},
required = {"etcd", "role_control_plane"}
},
data_plane = {
properties = {
etcd = etcd_schema,
role_data_plane = {
properties = {
config_provider = {
enum = {"etcd", "yaml", "json", "xds"}
},
},
required = {"config_provider"}
},
},
required = {"role_data_plane"}
}
}
function _M.validate(yaml_conf)
local validator = jsonschema.generate_validator(config_schema)
local ok, err = validator(yaml_conf)
if not ok then
return false, "failed to validate config: " .. err
end
if yaml_conf.discovery then
for kind, conf in pairs(yaml_conf.discovery) do
local ok, schema = pcall(require, "apisix.discovery." .. kind .. ".schema")
if ok then
local validator = jsonschema.generate_validator(schema)
local ok, err = validator(conf)
if not ok then
return false, "invalid discovery " .. kind .. " configuration: " .. err
end
end
end
end
local role = yaml_conf.deployment.role
local validator = jsonschema.generate_validator(deployment_schema[role])
local ok, err = validator(yaml_conf.deployment)
if not ok then
return false, "invalid deployment " .. role .. " configuration: " .. err
end
return true
end
return _M

View File

@@ -0,0 +1,189 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local require = require
local pcall = pcall
local open = io.open
local popen = io.popen
local close = io.close
local exit = os.exit
local stderr = io.stderr
local str_format = string.format
local tonumber = tonumber
local io = io
local ipairs = ipairs
local assert = assert
local _M = {}
-- Note: The `execute_cmd` return value will have a line break at the end,
-- it is recommended to use the `trim` function to handle the return value.
local function execute_cmd(cmd)
local t, err = popen(cmd)
if not t then
return nil, "failed to execute command: "
.. cmd .. ", error info: " .. err
end
local data, err = t:read("*all")
t:close()
if not data then
return nil, "failed to read execution result of: "
.. cmd .. ", error info: " .. err
end
return data
end
_M.execute_cmd = execute_cmd
-- For commands which stdout would be always be empty,
-- forward stderr to stdout to get the error msg
function _M.execute_cmd_with_error(cmd)
return execute_cmd(cmd .. " 2>&1")
end
function _M.trim(s)
return (s:gsub("^%s*(.-)%s*$", "%1"))
end
function _M.split(self, sep)
local sep, fields = sep or ":", {}
local pattern = str_format("([^%s]+)", sep)
self:gsub(pattern, function(c) fields[#fields + 1] = c end)
return fields
end
function _M.read_file(file_path)
local file, err = open(file_path, "rb")
if not file then
return false, "failed to open file: " .. file_path .. ", error info:" .. err
end
local data, err = file:read("*all")
file:close()
if not data then
return false, "failed to read file: " .. file_path .. ", error info:" .. err
end
return data
end
function _M.die(...)
stderr:write(...)
exit(1)
end
function _M.is_32bit_arch()
local ok, ffi = pcall(require, "ffi")
if ok then
-- LuaJIT
return ffi.abi("32bit")
end
local ret = _M.execute_cmd("getconf LONG_BIT")
local bits = tonumber(ret)
return bits <= 32
end
function _M.write_file(file_path, data)
local file, err = open(file_path, "w+")
if not file then
return false, "failed to open file: "
.. file_path
.. ", error info:"
.. err
end
local ok, err = file:write(data)
file:close()
if not ok then
return false, "failed to write file: "
.. file_path
.. ", error info:"
.. err
end
return true
end
function _M.file_exists(file_path)
local f = open(file_path, "r")
return f ~= nil and close(f)
end
do
local trusted_certs_paths = {
"/etc/ssl/certs/ca-certificates.crt", -- Debian/Ubuntu/Gentoo
"/etc/pki/tls/certs/ca-bundle.crt", -- Fedora/RHEL 6
"/etc/ssl/ca-bundle.pem", -- OpenSUSE
"/etc/pki/tls/cacert.pem", -- OpenELEC
"/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", -- CentOS/RHEL 7
"/etc/ssl/cert.pem", -- OpenBSD, Alpine
}
-- Check if a file exists using Lua's built-in `io.open`
local function file_exists(path)
local file = io.open(path, "r")
if file then
file:close()
return true
else
return false
end
end
function _M.get_system_trusted_certs_filepath()
for _, path in ipairs(trusted_certs_paths) do
if file_exists(path) then
return path
end
end
return nil,
"Could not find trusted certs file in " ..
"any of the `system`-predefined locations. " ..
"Please install a certs file there or set " ..
"`lua_ssl_trusted_certificate` to a " ..
"specific file path instead of `system`"
end
end
function _M.gen_trusted_certs_combined_file(combined_filepath, paths)
local combined_file = assert(io.open(combined_filepath, "w"))
for _, path in ipairs(paths) do
local cert_file = assert(io.open(path, "r"))
combined_file:write(cert_file:read("*a"))
combined_file:write("\n")
cert_file:close()
end
combined_file:close()
end
return _M