feat(apisix): add Cloudron package

- Implements Apache APISIX packaging for Cloudron platform.
- Includes Dockerfile, CloudronManifest.json, and start.sh.
- Configured to use Cloudron's etcd addon.

🤖 Generated with Gemini CLI
Co-Authored-By: Gemini <noreply@google.com>
This commit is contained in:
2025-09-04 09:42:47 -05:00
parent f7bae09f22
commit 54cc5f7308
1608 changed files with 388342 additions and 0 deletions

View File

@@ -0,0 +1,66 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local consumers = require("apisix.consumer").consumers
local resource = require("apisix.admin.resource")
local schema_plugin = require("apisix.admin.plugins").check_schema
local type = type
local tostring = tostring
local ipairs = ipairs
local function check_conf(id, conf, need_id, schema)
local ok, err = core.schema.check(schema, conf)
if not ok then
return nil, {error_msg = "invalid configuration: " .. err}
end
local ok, err = schema_plugin(conf.plugins)
if not ok then
return nil, {error_msg = err}
end
return true
end
local function delete_checker(id)
local consumers, consumers_ver = consumers()
if consumers_ver and consumers then
for _, consumer in ipairs(consumers) do
if type(consumer) == "table" and consumer.value
and consumer.value.group_id
and tostring(consumer.value.group_id) == id then
return 400, {error_msg = "can not delete this consumer group,"
.. " consumer [" .. consumer.value.id
.. "] is still using it now"}
end
end
end
return nil, nil
end
return resource.new({
name = "consumer_groups",
kind = "consumer group",
schema = core.schema.consumer_group,
checker = check_conf,
unsupported_methods = {"post"},
delete_checker = delete_checker
})

View File

@@ -0,0 +1,65 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local plugins = require("apisix.admin.plugins")
local resource = require("apisix.admin.resource")
local function check_conf(username, conf, need_username, schema)
local ok, err = core.schema.check(schema, conf)
if not ok then
return nil, {error_msg = "invalid configuration: " .. err}
end
if username and username ~= conf.username then
return nil, {error_msg = "wrong username" }
end
if conf.plugins then
ok, err = plugins.check_schema(conf.plugins, core.schema.TYPE_CONSUMER)
if not ok then
return nil, {error_msg = "invalid plugins configuration: " .. err}
end
end
if conf.group_id then
local key = "/consumer_groups/" .. conf.group_id
local res, err = core.etcd.get(key)
if not res then
return nil, {error_msg = "failed to fetch consumer group info by "
.. "consumer group id [" .. conf.group_id .. "]: "
.. err}
end
if res.status ~= 200 then
return nil, {error_msg = "failed to fetch consumer group info by "
.. "consumer group id [" .. conf.group_id .. "], "
.. "response code: " .. res.status}
end
end
return conf.username
end
return resource.new({
name = "consumers",
kind = "consumer",
schema = core.schema.consumer,
checker = check_conf,
unsupported_methods = {"post", "patch"}
})

View File

@@ -0,0 +1,74 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local plugins = require("apisix.admin.plugins")
local plugin = require("apisix.plugin")
local resource = require("apisix.admin.resource")
local pairs = pairs
local function check_conf(_id, conf, _need_id, schema)
local ok, err = core.schema.check(schema, conf)
if not ok then
return nil, {error_msg = "invalid configuration: " .. err}
end
if conf.plugins then
ok, err = plugins.check_schema(conf.plugins, core.schema.TYPE_CONSUMER)
if not ok then
return nil, {error_msg = "invalid plugins configuration: " .. err}
end
for name, _ in pairs(conf.plugins) do
local plugin_obj = plugin.get(name)
if not plugin_obj then
return nil, {error_msg = "unknown plugin " .. name}
end
if plugin_obj.type ~= "auth" then
return nil, {error_msg = "only supports auth type plugins in consumer credential"}
end
end
end
return true, nil
end
-- get_credential_etcd_key is used to splice the credential's etcd key (without prefix)
-- from credential_id and sub_path.
-- Parameter credential_id is from the uri or payload; sub_path is in the form of
-- {consumer_name}/credentials or {consumer_name}/credentials/{credential_id}.
-- Only if GET credentials list, credential_id is nil, sub_path is like {consumer_name}/credentials,
-- so return value is /consumers/{consumer_name}/credentials.
-- In the other methods, credential_id is not nil, return value is
-- /consumers/{consumer_name}/credentials/{credential_id}.
local function get_credential_etcd_key(credential_id, _conf, sub_path, _args)
if credential_id then
local uri_segs = core.utils.split_uri(sub_path)
local consumer_name = uri_segs[1]
return "/consumers/" .. consumer_name .. "/credentials/" .. credential_id
end
return "/consumers/" .. sub_path
end
return resource.new({
name = "credentials",
kind = "credential",
schema = core.schema.credential,
checker = check_conf,
get_resource_etcd_key = get_credential_etcd_key,
unsupported_methods = {"post", "patch"}
})

View File

@@ -0,0 +1,43 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local resource = require("apisix.admin.resource")
local schema_plugin = require("apisix.admin.plugins").check_schema
local function check_conf(id, conf, need_id, schema)
local ok, err = core.schema.check(schema, conf)
if not ok then
return nil, {error_msg = "invalid configuration: " .. err}
end
local ok, err = schema_plugin(conf.plugins)
if not ok then
return nil, {error_msg = err}
end
return true
end
return resource.new({
name = "global_rules",
kind = "global rule",
schema = core.schema.global_rule,
checker = check_conf,
unsupported_methods = {"post"}
})

View File

@@ -0,0 +1,526 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local require = require
local core = require("apisix.core")
local get_uri_args = ngx.req.get_uri_args
local route = require("apisix.utils.router")
local plugin = require("apisix.plugin")
local standalone = require("apisix.admin.standalone")
local v3_adapter = require("apisix.admin.v3_adapter")
local utils = require("apisix.admin.utils")
local ngx = ngx
local get_method = ngx.req.get_method
local ngx_time = ngx.time
local ngx_timer_at = ngx.timer.at
local ngx_worker_id = ngx.worker.id
local tonumber = tonumber
local tostring = tostring
local str_lower = string.lower
local reload_event = "/apisix/admin/plugins/reload"
local ipairs = ipairs
local error = error
local type = type
local events
local MAX_REQ_BODY = 1024 * 1024 * 1.5 -- 1.5 MiB
local viewer_methods = {
get = true,
}
local resources = {
routes = require("apisix.admin.routes"),
services = require("apisix.admin.services"),
upstreams = require("apisix.admin.upstreams"),
consumers = require("apisix.admin.consumers"),
credentials = require("apisix.admin.credentials"),
schema = require("apisix.admin.schema"),
ssls = require("apisix.admin.ssl"),
plugins = require("apisix.admin.plugins"),
protos = require("apisix.admin.proto"),
global_rules = require("apisix.admin.global_rules"),
stream_routes = require("apisix.admin.stream_routes"),
plugin_metadata = require("apisix.admin.plugin_metadata"),
plugin_configs = require("apisix.admin.plugin_config"),
consumer_groups = require("apisix.admin.consumer_group"),
secrets = require("apisix.admin.secrets"),
}
local _M = {version = 0.4}
local router
local function check_token(ctx)
local local_conf = core.config.local_conf()
-- check if admin_key is required
if local_conf.deployment.admin.admin_key_required == false then
return true
end
local admin_key = core.table.try_read_attr(local_conf, "deployment", "admin", "admin_key")
if not admin_key then
return true
end
local req_token = ctx.var.arg_api_key or ctx.var.http_x_api_key
or ctx.var.cookie_x_api_key
if not req_token then
return false, "missing apikey"
end
local admin
for i, row in ipairs(admin_key) do
if req_token == row.key then
admin = row
break
end
end
if not admin then
return false, "wrong apikey"
end
if admin.role == "viewer" and
not viewer_methods[str_lower(get_method())] then
return false, "invalid method for role viewer"
end
return true
end
-- Set the `apictx` variable and check admin api token, if the check fails, the current
-- request will be interrupted and an error response will be returned.
--
-- NOTE: This is a higher wrapper for `check_token` function.
local function set_ctx_and_check_token()
local api_ctx = {}
core.ctx.set_vars_meta(api_ctx)
ngx.ctx.api_ctx = api_ctx
local ok, err = check_token(api_ctx)
if not ok then
core.log.warn("failed to check token: ", err)
core.response.exit(401, { error_msg = "failed to check token", description = err })
end
end
local function strip_etcd_resp(data)
if type(data) == "table"
and data.header ~= nil
and data.header.revision ~= nil
and data.header.raft_term ~= nil
then
-- strip etcd data
data.header = nil
data.responses = nil
data.succeeded = nil
if data.node then
data.node.createdIndex = nil
data.node.modifiedIndex = nil
end
data.count = nil
data.more = nil
data.prev_kvs = nil
if data.deleted then
-- We used to treat the type incorrectly. But for compatibility we follow
-- the existing type.
data.deleted = tostring(data.deleted)
end
end
return data
end
local function head()
core.response.exit(200)
end
local function run()
set_ctx_and_check_token()
local uri_segs = core.utils.split_uri(ngx.var.uri)
core.log.info("uri: ", core.json.delay_encode(uri_segs))
-- /apisix/admin/schema/route
local seg_res, seg_id = uri_segs[4], uri_segs[5]
local seg_sub_path = core.table.concat(uri_segs, "/", 6)
if seg_res == "schema" and seg_id == "plugins" then
-- /apisix/admin/schema/plugins/limit-count
seg_res, seg_id = uri_segs[5], uri_segs[6]
seg_sub_path = core.table.concat(uri_segs, "/", 7)
end
if seg_res == "stream_routes" then
local local_conf = core.config.local_conf()
if local_conf.apisix.proxy_mode ~= "stream" and
local_conf.apisix.proxy_mode ~= "http&stream" then
core.log.warn("stream mode is disabled, can not add any stream ",
"routes")
core.response.exit(400, {error_msg = "stream mode is disabled, " ..
"can not add stream routes"})
end
end
if seg_res == "consumers" and #uri_segs >= 6 and uri_segs[6] == "credentials" then
seg_sub_path = seg_id .. "/" .. seg_sub_path
seg_res = uri_segs[6]
seg_id = uri_segs[7]
end
local resource = resources[seg_res]
if not resource then
core.response.exit(404, {error_msg = "Unsupported resource type: ".. seg_res})
end
local method = str_lower(get_method())
if not resource[method] then
core.response.exit(404, {error_msg = "not found"})
end
local req_body, err = core.request.get_body(MAX_REQ_BODY)
if err then
core.log.error("failed to read request body: ", err)
core.response.exit(400, {error_msg = "invalid request body: " .. err})
end
if req_body then
local data, err = core.json.decode(req_body)
if err then
core.log.error("invalid request body: ", req_body, " err: ", err)
core.response.exit(400, {error_msg = "invalid request body: " .. err,
req_body = req_body})
end
req_body = data
end
local uri_args = ngx.req.get_uri_args() or {}
if uri_args.ttl then
if not tonumber(uri_args.ttl) then
core.response.exit(400, {error_msg = "invalid argument ttl: "
.. "should be a number"})
end
end
local code, data
if seg_res == "schema" or seg_res == "plugins" then
code, data = resource[method](seg_id, req_body, seg_sub_path, uri_args)
else
code, data = resource[method](resource, seg_id, req_body, seg_sub_path, uri_args)
end
if code then
if method == "get" and plugin.enable_data_encryption then
if seg_res == "consumers" or seg_res == "credentials" then
utils.decrypt_params(plugin.decrypt_conf, data, core.schema.TYPE_CONSUMER)
elseif seg_res == "plugin_metadata" then
utils.decrypt_params(plugin.decrypt_conf, data, core.schema.TYPE_METADATA)
else
utils.decrypt_params(plugin.decrypt_conf, data)
end
end
if v3_adapter.enable_v3() then
core.response.set_header("X-API-VERSION", "v3")
else
core.response.set_header("X-API-VERSION", "v2")
end
data = v3_adapter.filter(data, resource)
data = strip_etcd_resp(data)
core.response.exit(code, data)
end
end
local function get_plugins_list()
set_ctx_and_check_token()
local args = get_uri_args()
local subsystem = args["subsystem"]
-- If subsystem is passed then it should be either http or stream.
-- If it is not passed/nil then http will be default.
subsystem = subsystem or "http"
if subsystem == "http" or subsystem == "stream" then
local plugins = resources.plugins.get_plugins_list(subsystem)
core.response.exit(200, plugins)
end
core.response.exit(400,"invalid subsystem passed")
end
-- Handle unsupported request methods for the virtual "reload" plugin
local function unsupported_methods_reload_plugin()
set_ctx_and_check_token()
core.response.exit(405, {
error_msg = "please use PUT method to reload the plugins, "
.. get_method() .. " method is not allowed."
})
end
local function post_reload_plugins()
set_ctx_and_check_token()
local success, err = events:post(reload_event, get_method(), ngx_time())
if not success then
core.response.exit(503, err)
end
core.response.exit(200, "done")
end
local function plugins_eq(old, new)
local old_set = {}
for _, p in ipairs(old) do
old_set[p.name] = p
end
local new_set = {}
for _, p in ipairs(new) do
new_set[p.name] = p
end
return core.table.set_eq(old_set, new_set)
end
local function sync_local_conf_to_etcd(reset)
local local_conf = core.config.local_conf()
local plugins = {}
for _, name in ipairs(local_conf.plugins) do
core.table.insert(plugins, {
name = name,
})
end
for _, name in ipairs(local_conf.stream_plugins) do
core.table.insert(plugins, {
name = name,
stream = true,
})
end
if reset then
local res, err = core.etcd.get("/plugins")
if not res then
core.log.error("failed to get current plugins: ", err)
return
end
if res.status == 404 then
-- nothing need to be reset
return
end
if res.status ~= 200 then
core.log.error("failed to get current plugins, status: ", res.status)
return
end
local stored_plugins = res.body.node.value
local revision = res.body.node.modifiedIndex
if plugins_eq(stored_plugins, plugins) then
core.log.info("plugins not changed, don't need to reset")
return
end
core.log.warn("sync local conf to etcd")
local res, err = core.etcd.atomic_set("/plugins", plugins, nil, revision)
if not res then
core.log.error("failed to set plugins: ", err)
end
return
end
core.log.warn("sync local conf to etcd")
-- need to store all plugins name into one key so that it can be updated atomically
local res, err = core.etcd.set("/plugins", plugins)
if not res then
core.log.error("failed to set plugins: ", err)
end
end
local function reload_plugins(data, event, source, pid)
core.log.info("start to hot reload plugins")
plugin.load()
if ngx_worker_id() == 0 then
sync_local_conf_to_etcd()
end
end
local function schema_validate()
local uri_segs = core.utils.split_uri(ngx.var.uri)
core.log.info("uri: ", core.json.delay_encode(uri_segs))
local seg_res = uri_segs[6]
local resource = resources[seg_res]
if not resource then
core.response.exit(404, {error_msg = "Unsupported resource type: ".. seg_res})
end
local req_body, err = core.request.get_body(MAX_REQ_BODY)
if err then
core.log.error("failed to read request body: ", err)
core.response.exit(400, {error_msg = "invalid request body: " .. err})
end
if req_body then
local data, err = core.json.decode(req_body)
if err then
core.log.error("invalid request body: ", req_body, " err: ", err)
core.response.exit(400, {error_msg = "invalid request body: " .. err,
req_body = req_body})
end
req_body = data
end
local ok, err = core.schema.check(resource.schema, req_body)
if ok then
core.response.exit(200)
end
core.response.exit(400, {error_msg = err})
end
local function standalone_run()
set_ctx_and_check_token()
return standalone.run()
end
local http_head_route = {
paths = [[/apisix/admin]],
methods = {"HEAD"},
handler = head,
}
local uri_route = {
http_head_route,
{
paths = [[/apisix/admin/*]],
methods = {"GET", "PUT", "POST", "DELETE", "PATCH"},
handler = run,
},
{
paths = [[/apisix/admin/plugins/list]],
methods = {"GET"},
handler = get_plugins_list,
},
{
paths = [[/apisix/admin/schema/validate/*]],
methods = {"POST"},
handler = schema_validate,
},
{
paths = reload_event,
methods = {"PUT"},
handler = post_reload_plugins,
},
-- Handle methods other than "PUT" on "/plugin/reload" to inform user
{
paths = reload_event,
methods = { "GET", "POST", "DELETE", "PATCH" },
handler = unsupported_methods_reload_plugin,
},
}
local standalone_uri_route = {
http_head_route,
{
paths = [[/apisix/admin/configs]],
methods = {"GET", "PUT"},
handler = standalone_run,
},
}
function _M.init_worker()
local local_conf = core.config.local_conf()
if not local_conf.apisix or not local_conf.apisix.enable_admin then
return
end
local is_yaml_config_provider = local_conf.deployment.config_provider == "yaml"
if is_yaml_config_provider then
router = route.new(standalone_uri_route)
standalone.init_worker()
else
router = route.new(uri_route)
end
-- register reload plugin handler
events = require("apisix.events")
events:register(reload_plugins, reload_event, "PUT")
if ngx_worker_id() == 0 then
-- check if admin_key is required
if local_conf.deployment.admin.admin_key_required == false then
core.log.warn("Admin key is bypassed! ",
"If you are deploying APISIX in a production environment, ",
"please enable `admin_key_required` and set a secure admin key!")
end
if is_yaml_config_provider then -- standalone mode does not need sync to etcd
return
end
local ok, err = ngx_timer_at(0, function(premature)
if premature then
return
end
-- try to reset the /plugins to the current configuration in the admin
sync_local_conf_to_etcd(true)
end)
if not ok then
error("failed to sync local configure to etcd: " .. err)
end
end
end
function _M.get()
return router
end
return _M

View File

@@ -0,0 +1,66 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local get_routes = require("apisix.router").http_routes
local resource = require("apisix.admin.resource")
local schema_plugin = require("apisix.admin.plugins").check_schema
local type = type
local tostring = tostring
local ipairs = ipairs
local function check_conf(id, conf, need_id, schema)
local ok, err = core.schema.check(schema, conf)
if not ok then
return nil, {error_msg = "invalid configuration: " .. err}
end
local ok, err = schema_plugin(conf.plugins)
if not ok then
return nil, {error_msg = err}
end
return true
end
local function delete_checker(id)
local routes, routes_ver = get_routes()
if routes_ver and routes then
for _, route in ipairs(routes) do
if type(route) == "table" and route.value
and route.value.plugin_config_id
and tostring(route.value.plugin_config_id) == id then
return 400, {error_msg = "can not delete this plugin config,"
.. " route [" .. route.value.id
.. "] is still using it now"}
end
end
end
return nil, nil
end
return resource.new({
name = "plugin_configs",
kind = "plugin config",
schema = core.schema.plugin_config,
checker = check_conf,
unsupported_methods = {"post"},
delete_checker = delete_checker
})

View File

@@ -0,0 +1,83 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local pcall = pcall
local require = require
local core = require("apisix.core")
local resource = require("apisix.admin.resource")
local encrypt_conf = require("apisix.plugin").encrypt_conf
local injected_mark = "injected metadata_schema"
local function validate_plugin(name)
local pkg_name = "apisix.plugins." .. name
local ok, plugin_object = pcall(require, pkg_name)
if ok then
return true, plugin_object
end
pkg_name = "apisix.stream.plugins." .. name
return pcall(require, pkg_name)
end
local function check_conf(plugin_name, conf)
if not plugin_name then
return nil, {error_msg = "missing plugin name"}
end
local ok, plugin_object = validate_plugin(plugin_name)
if not ok then
return nil, {error_msg = "invalid plugin name"}
end
if not plugin_object.metadata_schema then
plugin_object.metadata_schema = {
type = "object",
['$comment'] = injected_mark,
properties = {},
}
end
local schema = plugin_object.metadata_schema
local ok, err
if schema['$comment'] == injected_mark
-- check_schema is not required. If missing, fallback to check schema directly
or not plugin_object.check_schema
then
ok, err = core.schema.check(schema, conf)
else
ok, err = plugin_object.check_schema(conf, core.schema.TYPE_METADATA)
end
encrypt_conf(plugin_name, conf, core.schema.TYPE_METADATA)
if not ok then
return nil, {error_msg = "invalid configuration: " .. err}
end
return plugin_name
end
return resource.new({
name = "plugin_metadata",
kind = "plugin_metadata",
schema = core.schema.plugin_metadata,
checker = check_conf,
unsupported_methods = {"post", "patch"}
})

View File

@@ -0,0 +1,139 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local require = require
local core = require("apisix.core")
local check_schema = require("apisix.plugin").check_schema
local ipairs = ipairs
local table_sort = table.sort
local table_insert = table.insert
local get_uri_args = ngx.req.get_uri_args
local plugin_get_all = require("apisix.plugin").get_all
local plugin_get_http = require("apisix.plugin").get
local plugin_get_stream = require("apisix.plugin").get_stream
local encrypt_conf = require("apisix.plugin").encrypt_conf
local pairs = pairs
local _M = {}
function _M.check_schema(plugins_conf, schema_type)
local ok, err = check_schema(plugins_conf, schema_type, false)
if ok then
for name, conf in pairs(plugins_conf) do
encrypt_conf(name, conf, schema_type)
end
end
return ok, err
end
function _M.get(name)
local arg = get_uri_args()
-- If subsystem is passed inside args then it should be oneOf: http / stream.
local subsystem = arg["subsystem"] or "http"
if subsystem ~= "http" and subsystem ~= "stream" then
return 400, {error_msg = "unsupported subsystem: "..subsystem}
end
-- arg all to be deprecated
if (arg and arg["all"] == "true") then
core.log.warn("query parameter \"all\" will be deprecated soon.")
local http_plugins, stream_plugins = plugin_get_all({
version = true,
priority = true,
schema = true,
metadata_schema = true,
consumer_schema = true,
type = true,
scope = true,
})
if arg["subsystem"] == "stream" then
return 200, stream_plugins
end
return 200, http_plugins
end
local plugin
if subsystem == "http" then
plugin = plugin_get_http(name)
else
plugin = plugin_get_stream(name)
end
if not plugin then
local err = "plugin not found in subsystem " .. subsystem
core.log.warn(err)
return 404, {error_msg = err}
end
local json_schema = plugin.schema
if arg and arg["schema_type"] == "consumer" then
json_schema = plugin.consumer_schema
end
if not json_schema then
return 400, {error_msg = "not found schema"}
end
return 200, json_schema
end
function _M.get_plugins_list(subsystem)
local http_plugins
local stream_plugins
if subsystem == "http" then
http_plugins = core.config.local_conf().plugins
else
stream_plugins = core.config.local_conf().stream_plugins
end
local priorities = {}
local success = {}
if http_plugins then
for i, name in ipairs(http_plugins) do
local plugin = plugin_get_http(name)
if plugin and plugin.priority then
priorities[name] = plugin.priority
table_insert(success, name)
end
end
end
if stream_plugins then
for i, name in ipairs(stream_plugins) do
local plugin = plugin_get_stream(name)
if plugin and plugin.priority then
priorities[name] = plugin.priority
table_insert(success, name)
end
end
end
local function cmp(x, y)
return priorities[x] > priorities[y]
end
table_sort(success, cmp)
return success
end
return _M

View File

@@ -0,0 +1,111 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local type = type
local ipairs = ipairs
local core = require("apisix.core")
local resource = require("apisix.admin.resource")
local get_routes = require("apisix.router").http_routes
local get_services = require("apisix.http.service").services
local compile_proto = require("apisix.plugins.grpc-transcode.proto").compile_proto
local tostring = tostring
local function check_conf(id, conf, need_id, schema)
local ok, err = core.schema.check(schema, conf)
if not ok then
return nil, {error_msg = "invalid configuration: " .. err}
end
local ok, err = compile_proto(conf.content)
if not ok then
return nil, {error_msg = "invalid content: " .. err}
end
return true
end
local function check_proto_used(plugins, deleting, ptype, pid)
--core.log.info("check_proto_used plugins: ", core.json.delay_encode(plugins, true))
--core.log.info("check_proto_used deleting: ", deleting)
--core.log.info("check_proto_used ptype: ", ptype)
--core.log.info("check_proto_used pid: ", pid)
if plugins then
if type(plugins) == "table" and plugins["grpc-transcode"]
and plugins["grpc-transcode"].proto_id
and tostring(plugins["grpc-transcode"].proto_id) == deleting then
return false, {error_msg = "can not delete this proto, "
.. ptype .. " [" .. pid
.. "] is still using it now"}
end
end
return true
end
local function delete_checker(id)
core.log.info("proto delete: ", id)
local routes, routes_ver = get_routes()
core.log.info("routes: ", core.json.delay_encode(routes, true))
core.log.info("routes_ver: ", routes_ver)
if routes_ver and routes then
for _, route in ipairs(routes) do
core.log.info("proto delete route item: ", core.json.delay_encode(route, true))
if type(route) == "table" and route.value and route.value.plugins then
local ret, err = check_proto_used(route.value.plugins, id, "route",route.value.id)
if not ret then
return 400, err
end
end
end
end
core.log.info("proto delete route ref check pass: ", id)
local services, services_ver = get_services()
core.log.info("services: ", core.json.delay_encode(services, true))
core.log.info("services_ver: ", services_ver)
if services_ver and services then
for _, service in ipairs(services) do
if type(service) == "table" and service.value and service.value.plugins then
local ret, err = check_proto_used(service.value.plugins, id,
"service", service.value.id)
if not ret then
return 400, err
end
end
end
end
core.log.info("proto delete service ref check pass: ", id)
return nil, nil
end
return resource.new({
name = "protos",
kind = "proto",
schema = core.schema.proto,
checker = check_conf,
unsupported_methods = {"patch"},
delete_checker = delete_checker
})

View File

@@ -0,0 +1,468 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local utils = require("apisix.admin.utils")
local apisix_ssl = require("apisix.ssl")
local apisix_consumer = require("apisix.consumer")
local setmetatable = setmetatable
local tostring = tostring
local ipairs = ipairs
local type = type
local _M = {
list_filter_fields = {},
}
local mt = {
__index = _M
}
local no_id_res = {
consumers = true,
plugin_metadata = true
}
local function split_typ_and_id(id, sub_path)
local uri_segs = core.utils.split_uri(sub_path)
local typ = id
local id = nil
if #uri_segs > 0 then
id = uri_segs[1]
end
return typ, id
end
local function check_forbidden_properties(conf, forbidden_properties)
local not_allow_properties = "the property is forbidden: "
if conf then
for _, v in ipairs(forbidden_properties) do
if conf[v] then
return not_allow_properties .. " " .. v
end
end
if conf.upstream then
for _, v in ipairs(forbidden_properties) do
if conf.upstream[v] then
return not_allow_properties .. " upstream." .. v
end
end
end
if conf.plugins then
for _, v in ipairs(forbidden_properties) do
if conf.plugins[v] then
return not_allow_properties .. " plugins." .. v
end
end
end
end
return nil
end
function _M:check_conf(id, conf, need_id, typ, allow_time)
if self.name == "secrets" then
id = typ .. "/" .. id
end
-- check if missing configurations
if not conf then
return nil, {error_msg = "missing configurations"}
end
-- check id if need id
if not no_id_res[self.name] then
id = id or conf.id
if need_id and not id then
return nil, {error_msg = "missing ".. self.kind .. " id"}
end
if not need_id and id then
return nil, {error_msg = "wrong ".. self.kind .. " id, do not need it"}
end
if need_id and conf.id and tostring(conf.id) ~= tostring(id) then
return nil, {error_msg = "wrong ".. self.kind .. " id"}
end
conf.id = id
end
-- check create time and update time
if not allow_time then
local forbidden_properties = {"create_time", "update_time"}
local err = check_forbidden_properties(conf, forbidden_properties)
if err then
return nil, {error_msg = err}
end
end
core.log.info("conf : ", core.json.delay_encode(conf))
-- check the resource own rules
if self.name ~= "secrets" then
core.log.info("schema: ", core.json.delay_encode(self.schema))
end
local ok, err = self.checker(id, conf, need_id, self.schema, typ)
if not ok then
return ok, err
else
if no_id_res[self.name] then
return ok
else
return need_id and id or true
end
end
end
function _M:get(id, conf, sub_path)
if core.table.array_find(self.unsupported_methods, "get") then
return 405, {error_msg = "not supported `GET` method for " .. self.kind}
end
local key = "/" .. self.name
local typ = nil
if self.name == "secrets" then
key = key .. "/"
typ, id = split_typ_and_id(id, sub_path)
end
if id then
if self.name == "secrets" then
key = key .. typ
end
key = key .. "/" .. id
end
-- some resources(consumers) have sub resources(credentials),
-- the key format of sub resources will differ from the main resource
if self.get_resource_etcd_key then
key = self.get_resource_etcd_key(id, conf, sub_path)
end
local res, err = core.etcd.get(key, not id)
if not res then
core.log.error("failed to get ", self.kind, "[", key, "] from etcd: ", err)
return 503, {error_msg = err}
end
if self.name == "ssls" then
-- not return private key for security
if res.body and res.body.node and res.body.node.value then
res.body.node.value.key = nil
end
end
-- consumers etcd range response will include credentials, so need to filter out them
if self.name == "consumers" and res.body.list then
res.body.list = apisix_consumer.filter_consumers_list(res.body.list)
res.body.total = #res.body.list
end
utils.fix_count(res.body, id)
return res.status, res.body
end
function _M:post(id, conf, sub_path, args)
if core.table.array_find(self.unsupported_methods, "post") then
return 405, {error_msg = "not supported `POST` method for " .. self.kind}
end
local id, err = self:check_conf(id, conf, false)
if not id then
return 400, err
end
if self.name == "ssls" then
-- encrypt private key
conf.key = apisix_ssl.aes_encrypt_pkey(conf.key)
if conf.keys then
for i = 1, #conf.keys do
conf.keys[i] = apisix_ssl.aes_encrypt_pkey(conf.keys[i])
end
end
end
local key = "/" .. self.name
utils.inject_timestamp(conf)
local ttl = nil
if args then
ttl = args.ttl
end
local res, err = core.etcd.push(key, conf, ttl)
if not res then
core.log.error("failed to post ", self.kind, "[", key, "] to etcd: ", err)
return 503, {error_msg = err}
end
return res.status, res.body
end
function _M:put(id, conf, sub_path, args)
if core.table.array_find(self.unsupported_methods, "put") then
return 405, {error_msg = "not supported `PUT` method for " .. self.kind}
end
local key = "/" .. self.name
local typ = nil
if self.name == "secrets" then
typ, id = split_typ_and_id(id, sub_path)
key = key .. "/" .. typ
end
local need_id = not no_id_res[self.name]
local ok, err = self:check_conf(id, conf, need_id, typ)
if not ok then
return 400, err
end
if self.name ~= "secrets" then
id = ok
end
if self.name == "ssls" then
-- encrypt private key
conf.key = apisix_ssl.aes_encrypt_pkey(conf.key)
if conf.keys then
for i = 1, #conf.keys do
conf.keys[i] = apisix_ssl.aes_encrypt_pkey(conf.keys[i])
end
end
end
key = key .. "/" .. id
if self.get_resource_etcd_key then
key = self.get_resource_etcd_key(id, conf, sub_path, args)
end
if self.name == "credentials" then
local consumer_key = apisix_consumer.get_consumer_key_from_credential_key(key)
local res, err = core.etcd.get(consumer_key, false)
if not res then
return 503, {error_msg = err}
end
if res.status == 404 then
return res.status, {error_msg = "consumer not found"}
end
if res.status ~= 200 then
core.log.debug("failed to get consumer for the credential, credential key: ", key,
", consumer key: ", consumer_key, ", res.status: ", res.status)
return res.status, {error_msg = "failed to get the consumer"}
end
end
if self.name ~= "plugin_metadata" then
local ok, err = utils.inject_conf_with_prev_conf(self.kind, key, conf)
if not ok then
return 503, {error_msg = err}
end
else
conf.id = id
end
local ttl = nil
if args then
ttl = args.ttl
end
local res, err = core.etcd.set(key, conf, ttl)
if not res then
core.log.error("failed to put ", self.kind, "[", key, "] to etcd: ", err)
return 503, {error_msg = err}
end
return res.status, res.body
end
-- Keep the unused conf to make the args list consistent with other methods
function _M:delete(id, conf, sub_path, uri_args)
if core.table.array_find(self.unsupported_methods, "delete") then
return 405, {error_msg = "not supported `DELETE` method for " .. self.kind}
end
local key = "/" .. self.name
local typ = nil
if self.name == "secrets" then
typ, id = split_typ_and_id(id, sub_path)
end
if not id then
return 400, {error_msg = "missing " .. self.kind .. " id"}
end
-- core.log.error("failed to delete ", self.kind, "[", key, "] in etcd: ", err)
if self.name == "secrets" then
key = key .. "/" .. typ
end
key = key .. "/" .. id
if self.get_resource_etcd_key then
key = self.get_resource_etcd_key(id, conf, sub_path, uri_args)
end
if self.delete_checker and uri_args.force ~= "true" then
local code, err = self.delete_checker(id)
if err then
return code, err
end
end
if self.name == "consumers" then
local res, err = core.etcd.rmdir(key .. "/credentials/")
if not res then
return 503, {error_msg = err}
end
end
local res, err = core.etcd.delete(key)
if not res then
core.log.error("failed to delete ", self.kind, "[", key, "] in etcd: ", err)
return 503, {error_msg = err}
end
return res.status, res.body
end
function _M:patch(id, conf, sub_path, args)
if core.table.array_find(self.unsupported_methods, "patch") then
return 405, {error_msg = "not supported `PATCH` method for " .. self.kind}
end
local key = "/" .. self.name
local typ = nil
if self.name == "secrets" then
local uri_segs = core.utils.split_uri(sub_path)
if #uri_segs < 1 then
return 400, {error_msg = "no secret id"}
end
typ = id
id = uri_segs[1]
sub_path = core.table.concat(uri_segs, "/", 2)
end
if not id then
return 400, {error_msg = "missing " .. self.kind .. " id"}
end
if self.name == "secrets" then
key = key .. "/" .. typ
end
key = key .. "/" .. id
if conf == nil then
return 400, {error_msg = "missing new configuration"}
end
if not sub_path or sub_path == "" then
if type(conf) ~= "table" then
return 400, {error_msg = "invalid configuration"}
end
end
local res_old, err = core.etcd.get(key)
if not res_old then
core.log.error("failed to get ", self.kind, " [", key, "] in etcd: ", err)
return 503, {error_msg = err}
end
if res_old.status ~= 200 then
return res_old.status, res_old.body
end
core.log.info("key: ", key, " old value: ", core.json.delay_encode(res_old, true))
local node_value = res_old.body.node.value
local modified_index = res_old.body.node.modifiedIndex
if sub_path and sub_path ~= "" then
if self.name == "ssls" then
if sub_path == "key" then
conf = apisix_ssl.aes_encrypt_pkey(conf)
elseif sub_path == "keys" then
for i = 1, #conf do
conf[i] = apisix_ssl.aes_encrypt_pkey(conf[i])
end
end
end
local code, err, node_val = core.table.patch(node_value, sub_path, conf)
node_value = node_val
if code then
return code, {error_msg = err}
end
utils.inject_timestamp(node_value, nil, true)
else
if self.name == "ssls" then
if conf.key then
conf.key = apisix_ssl.aes_encrypt_pkey(conf.key)
end
if conf.keys then
for i = 1, #conf.keys do
conf.keys[i] = apisix_ssl.aes_encrypt_pkey(conf.keys[i])
end
end
end
node_value = core.table.merge(node_value, conf)
utils.inject_timestamp(node_value, nil, conf)
end
core.log.info("new conf: ", core.json.delay_encode(node_value, true))
local ok, err = self:check_conf(id, node_value, true, typ, true)
if not ok then
return 400, err
end
local ttl = nil
if args then
ttl = args.ttl
end
local res, err = core.etcd.atomic_set(key, node_value, ttl, modified_index)
if not res then
core.log.error("failed to set new ", self.kind, "[", key, "] to etcd: ", err)
return 503, {error_msg = err}
end
return res.status, res.body
end
function _M.new(opt)
return setmetatable(opt, mt)
end
return _M

View File

@@ -0,0 +1,184 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local expr = require("resty.expr.v1")
local core = require("apisix.core")
local apisix_upstream = require("apisix.upstream")
local resource = require("apisix.admin.resource")
local schema_plugin = require("apisix.admin.plugins").check_schema
local type = type
local loadstring = loadstring
local ipairs = ipairs
local jp = require("jsonpath")
local function validate_post_arg(node)
if type(node) ~= "table" then
return true
end
-- Handle post_arg conditions
if #node >= 3 and type(node[1]) == "string" and node[1]:find("^post_arg%.") then
local key = node[1]
local json_path = "$." .. key:sub(11) -- Remove "post_arg." prefix
local _, err = jp.parse(json_path)
if err then
return false, err
end
return true
end
for _, child in ipairs(node) do
local ok, err = validate_post_arg(child)
if not ok then
return false, err
end
end
return true
end
local function check_conf(id, conf, need_id, schema)
if conf.host and conf.hosts then
return nil, {error_msg = "only one of host or hosts is allowed"}
end
if conf.remote_addr and conf.remote_addrs then
return nil, {error_msg = "only one of remote_addr or remote_addrs is "
.. "allowed"}
end
local ok, err = core.schema.check(schema, conf)
if not ok then
return nil, {error_msg = "invalid configuration: " .. err}
end
local upstream_conf = conf.upstream
if upstream_conf then
local ok, err = apisix_upstream.check_upstream_conf(upstream_conf)
if not ok then
return nil, {error_msg = err}
end
end
local upstream_id = conf.upstream_id
if upstream_id then
local key = "/upstreams/" .. upstream_id
local res, err = core.etcd.get(key)
if not res then
return nil, {error_msg = "failed to fetch upstream info by "
.. "upstream id [" .. upstream_id .. "]: "
.. err}
end
if res.status ~= 200 then
return nil, {error_msg = "failed to fetch upstream info by "
.. "upstream id [" .. upstream_id .. "], "
.. "response code: " .. res.status}
end
end
local service_id = conf.service_id
if service_id then
local key = "/services/" .. service_id
local res, err = core.etcd.get(key)
if not res then
return nil, {error_msg = "failed to fetch service info by "
.. "service id [" .. service_id .. "]: "
.. err}
end
if res.status ~= 200 then
return nil, {error_msg = "failed to fetch service info by "
.. "service id [" .. service_id .. "], "
.. "response code: " .. res.status}
end
end
local plugin_config_id = conf.plugin_config_id
if plugin_config_id then
local key = "/plugin_configs/" .. plugin_config_id
local res, err = core.etcd.get(key)
if not res then
return nil, {error_msg = "failed to fetch plugin config info by "
.. "plugin config id [" .. plugin_config_id .. "]: "
.. err}
end
if res.status ~= 200 then
return nil, {error_msg = "failed to fetch plugin config info by "
.. "plugin config id [" .. plugin_config_id .. "], "
.. "response code: " .. res.status}
end
end
if conf.plugins then
local ok, err = schema_plugin(conf.plugins)
if not ok then
return nil, {error_msg = err}
end
end
if conf.vars then
ok, err = expr.new(conf.vars)
if not ok then
return nil, {error_msg = "failed to validate the 'vars' expression: " .. err}
end
end
ok, err = validate_post_arg(conf.vars)
if not ok then
return nil, {error_msg = "failed to validate the 'vars' expression: " ..
err}
end
if conf.filter_func then
local func, err = loadstring("return " .. conf.filter_func)
if not func then
return nil, {error_msg = "failed to load 'filter_func' string: "
.. err}
end
if type(func()) ~= "function" then
return nil, {error_msg = "'filter_func' should be a function"}
end
end
if conf.script then
local obj, err = loadstring(conf.script)
if not obj then
return nil, {error_msg = "failed to load 'script' string: "
.. err}
end
if type(obj()) ~= "table" then
return nil, {error_msg = "'script' should be a Lua object"}
end
end
return true
end
return resource.new({
name = "routes",
kind = "route",
schema = core.schema.route,
checker = check_conf,
list_filter_fields = {
service_id = true,
upstream_id = true,
},
})

View File

@@ -0,0 +1,35 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local _M = {
version = 0.1,
}
function _M.get(name)
local json_schema = core.schema[name]
core.log.info("schema: ", core.json.delay_encode(core.schema, true))
if not json_schema then
return 400, {error_msg = "not found schema: " .. name}
end
return 200, json_schema
end
return _M

View File

@@ -0,0 +1,45 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local require = require
local core = require("apisix.core")
local resource = require("apisix.admin.resource")
local pcall = pcall
local function check_conf(id, conf, need_id, schema, typ)
local ok, secret_manager = pcall(require, "apisix.secret." .. typ)
if not ok then
return false, {error_msg = "invalid secret manager: " .. typ}
end
local ok, err = core.schema.check(secret_manager.schema, conf)
if not ok then
return nil, {error_msg = "invalid configuration: " .. err}
end
return true
end
return resource.new({
name = "secrets",
kind = "secret",
checker = check_conf,
unsupported_methods = {"post"}
})

View File

@@ -0,0 +1,128 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local get_routes = require("apisix.router").http_routes
local get_stream_routes = require("apisix.router").stream_routes
local apisix_upstream = require("apisix.upstream")
local resource = require("apisix.admin.resource")
local schema_plugin = require("apisix.admin.plugins").check_schema
local tostring = tostring
local ipairs = ipairs
local type = type
local loadstring = loadstring
local function check_conf(id, conf, need_id, schema)
local ok, err = core.schema.check(schema, conf)
if not ok then
return nil, {error_msg = "invalid configuration: " .. err}
end
if need_id and not id then
return nil, {error_msg = "wrong type of service id"}
end
local upstream_conf = conf.upstream
if upstream_conf then
local ok, err = apisix_upstream.check_upstream_conf(upstream_conf)
if not ok then
return nil, {error_msg = err}
end
end
local upstream_id = conf.upstream_id
if upstream_id then
local key = "/upstreams/" .. upstream_id
local res, err = core.etcd.get(key)
if not res then
return nil, {error_msg = "failed to fetch upstream info by "
.. "upstream id [" .. upstream_id .. "]: "
.. err}
end
if res.status ~= 200 then
return nil, {error_msg = "failed to fetch upstream info by "
.. "upstream id [" .. upstream_id .. "], "
.. "response code: " .. res.status}
end
end
if conf.plugins then
local ok, err = schema_plugin(conf.plugins)
if not ok then
return nil, {error_msg = err}
end
end
if conf.script then
local obj, err = loadstring(conf.script)
if not obj then
return nil, {error_msg = "failed to load 'script' string: "
.. err}
end
if type(obj()) ~= "table" then
return nil, {error_msg = "'script' should be a Lua object"}
end
end
return true
end
local function delete_checker(id)
local routes, routes_ver = get_routes()
core.log.info("routes: ", core.json.delay_encode(routes, true))
core.log.info("routes_ver: ", routes_ver)
if routes_ver and routes then
for _, route in ipairs(routes) do
if type(route) == "table" and route.value
and route.value.service_id
and tostring(route.value.service_id) == id then
return 400, {error_msg = "can not delete this service directly,"
.. " route [" .. route.value.id
.. "] is still using it now"}
end
end
end
local stream_routes, stream_routes_ver = get_stream_routes()
core.log.info("stream_routes: ", core.json.delay_encode(stream_routes, true))
core.log.info("stream_routes_ver: ", stream_routes_ver)
if stream_routes_ver and stream_routes then
for _, route in ipairs(stream_routes) do
if type(route) == "table" and route.value
and route.value.service_id
and tostring(route.value.service_id) == id then
return 400, {error_msg = "can not delete this service directly,"
.. " stream_route [" .. route.value.id
.. "] is still using it now"}
end
end
end
return nil, nil
end
return resource.new({
name = "services",
kind = "service",
schema = core.schema.service,
checker = check_conf,
delete_checker = delete_checker
})

View File

@@ -0,0 +1,37 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local resource = require("apisix.admin.resource")
local apisix_ssl = require("apisix.ssl")
local function check_conf(id, conf, need_id, schema)
local ok, err = apisix_ssl.check_ssl_conf(false, conf)
if not ok then
return nil, {error_msg = err}
end
return need_id and id or true
end
return resource.new({
name = "ssls",
kind = "ssl",
schema = core.schema.ssl,
checker = check_conf
})

View File

@@ -0,0 +1,339 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local type = type
local pairs = pairs
local ipairs = ipairs
local str_lower = string.lower
local ngx = ngx
local get_method = ngx.req.get_method
local shared_dict = ngx.shared["standalone-config"]
local table_insert = table.insert
local table_new = require("table.new")
local yaml = require("lyaml")
local events = require("apisix.events")
local core = require("apisix.core")
local config_yaml = require("apisix.core.config_yaml")
local check_schema = require("apisix.core.schema").check
local tbl_deepcopy = require("apisix.core.table").deepcopy
local EVENT_UPDATE = "standalone-api-configuration-update"
local _M = {}
local function check_duplicate(item, key, id_set)
local identifier, identifier_type
if key == "consumers" then
identifier = item.id or item.username
identifier_type = item.id and "credential id" or "username"
else
identifier = item.id
identifier_type = "id"
end
if id_set[identifier] then
return true, "found duplicate " .. identifier_type .. " " .. identifier .. " in " .. key
end
id_set[identifier] = true
return false
end
local function get_config()
local config = shared_dict:get("config")
if not config then
return nil, "not found"
end
local err
config, err = core.json.decode(config)
if not config then
return nil, "failed to decode json: " .. err
end
return config
end
local function update_and_broadcast_config(apisix_yaml)
local raw, err = core.json.encode(apisix_yaml)
if not raw then
core.log.error("failed to encode json: ", err)
return nil, "failed to encode json: " .. err
end
if shared_dict then
-- the worker that handles Admin API calls is responsible for writing the shared dict
local ok, err = shared_dict:set("config", raw)
if not ok then
return nil, "failed to save config to shared dict: " .. err
end
core.log.info("standalone config updated: ", raw)
else
core.log.crit(config_yaml.ERR_NO_SHARED_DICT)
end
return events:post(EVENT_UPDATE, EVENT_UPDATE)
end
local function update(ctx)
local content_type = core.request.header(nil, "content-type") or "application/json"
-- read the request body
local req_body, err = core.request.get_body()
if err then
return core.response.exit(400, {error_msg = "invalid request body: " .. err})
end
if not req_body or #req_body <= 0 then
return core.response.exit(400, {error_msg = "invalid request body: empty request body"})
end
-- parse the request body
local data
if core.string.has_prefix(content_type, "application/yaml") then
data = yaml.load(req_body, { all = false })
if not data or type(data) ~= "table" then
err = "invalid yaml request body"
end
else
data, err = core.json.decode(req_body)
end
if err then
core.log.error("invalid request body: ", req_body, " err: ", err)
core.response.exit(400, {error_msg = "invalid request body: " .. err})
end
req_body = data
local config, err = get_config()
if not config then
if err ~= "not found" then
core.log.error("failed to get config from shared dict: ", err)
return core.response.exit(500, {
error_msg = "failed to get config from shared dict: " .. err
})
end
end
-- check input by jsonschema
local apisix_yaml = {}
local created_objs = config_yaml.fetch_all_created_obj()
for key, obj in pairs(created_objs) do
local conf_version_key = obj.conf_version_key
local conf_version = config and config[conf_version_key] or obj.conf_version
local items = req_body[key]
local new_conf_version = req_body[conf_version_key]
if not new_conf_version then
new_conf_version = conf_version + 1
else
if type(new_conf_version) ~= "number" then
return core.response.exit(400, {
error_msg = conf_version_key .. " must be a number",
})
end
if new_conf_version < conf_version then
return core.response.exit(400, {
error_msg = conf_version_key ..
" must be greater than or equal to (" .. conf_version .. ")",
})
end
end
apisix_yaml[conf_version_key] = new_conf_version
if new_conf_version == conf_version then
apisix_yaml[key] = config and config[key]
elseif items and #items > 0 then
apisix_yaml[key] = table_new(#items, 0)
local item_schema = obj.item_schema
local item_checker = obj.checker
local id_set = {}
for index, item in ipairs(items) do
local item_temp = tbl_deepcopy(item)
local valid, err
-- need to recover to 0-based subscript
local err_prefix = "invalid " .. key .. " at index " .. (index - 1) .. ", err: "
if item_schema then
valid, err = check_schema(obj.item_schema, item_temp)
if not valid then
core.log.error(err_prefix, err)
core.response.exit(400, {error_msg = err_prefix .. err})
end
end
if item_checker then
local item_checker_key
if item.id then
-- credential need to check key
item_checker_key = "/" .. key .. "/" .. item_temp.id
end
valid, err = item_checker(item_temp, item_checker_key)
if not valid then
core.log.error(err_prefix, err)
core.response.exit(400, {error_msg = err_prefix .. err})
end
end
-- prevent updating resource with the same ID
-- (e.g., service ID or other resource IDs) in a single request
local duplicated, err = check_duplicate(item, key, id_set)
if duplicated then
core.log.error(err)
core.response.exit(400, { error_msg = err })
end
table_insert(apisix_yaml[key], item)
end
end
end
local ok, err = update_and_broadcast_config(apisix_yaml)
if not ok then
core.response.exit(500, err)
end
return core.response.exit(202)
end
local function get(ctx)
local accept = core.request.header(nil, "accept") or "application/json"
local want_yaml_resp = core.string.has_prefix(accept, "application/yaml")
local config, err = get_config()
if not config then
if err ~= "not found" then
core.log.error("failed to get config from shared dict: ", err)
return core.response.exit(500, {
error_msg = "failed to get config from shared dict: " .. err
})
end
config = {}
local created_objs = config_yaml.fetch_all_created_obj()
for _, obj in pairs(created_objs) do
config[obj.conf_version_key] = obj.conf_version
end
end
local resp, err
if want_yaml_resp then
core.response.set_header("Content-Type", "application/yaml")
resp = yaml.dump({ config })
if not resp then
err = "failed to encode yaml"
end
-- remove the first line "---" and the last line "..."
-- because the yaml.dump() will add them for multiple documents
local m = ngx.re.match(resp, [[^---\s*([\s\S]*?)\s*\.\.\.\s*$]], "jo")
if m and m[1] then
resp = m[1]
end
else
core.response.set_header("Content-Type", "application/json")
resp, err = core.json.encode(config, true)
if not resp then
err = "failed to encode json: " .. err
end
end
if not resp then
return core.response.exit(500, {error_msg = err})
end
return core.response.exit(200, resp)
end
function _M.run()
local ctx = ngx.ctx.api_ctx
local method = str_lower(get_method())
if method == "put" then
return update(ctx)
else
return get(ctx)
end
end
local patch_schema
do
local resource_schema = {
"proto",
"global_rule",
"route",
"service",
"upstream",
"consumer",
"consumer_group",
"credential",
"ssl",
"plugin_config",
}
local function attach_modifiedIndex_schema(name)
local schema = core.schema[name]
if not schema then
core.log.error("schema for ", name, " not found")
return
end
if schema.properties and not schema.properties.modifiedIndex then
schema.properties.modifiedIndex = {
type = "integer",
}
end
end
local function patch_credential_schema()
local credential_schema = core.schema["credential"]
if credential_schema and credential_schema.properties then
credential_schema.properties.id = {
type = "string",
minLength = 15,
maxLength = 128,
pattern = [[^[a-zA-Z0-9-_]+/credentials/[a-zA-Z0-9-_.]+$]],
}
end
end
function patch_schema()
-- attach modifiedIndex schema to all resource schemas
for _, name in ipairs(resource_schema) do
attach_modifiedIndex_schema(name)
end
-- patch credential schema
patch_credential_schema()
end
end
function _M.init_worker()
local function update_config()
local config, err = shared_dict:get("config")
if not config then
core.log.error("failed to get config from shared dict: ", err)
return
end
config, err = core.json.decode(config)
if not config then
core.log.error("failed to decode json: ", err)
return
end
config_yaml._update_config(config)
end
events:register(update_config, EVENT_UPDATE, EVENT_UPDATE)
patch_schema()
end
return _M

View File

@@ -0,0 +1,81 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local resource = require("apisix.admin.resource")
local stream_route_checker = require("apisix.stream.router.ip_port").stream_route_checker
local function check_conf(id, conf, need_id, schema)
local ok, err = core.schema.check(schema, conf)
if not ok then
return nil, {error_msg = "invalid configuration: " .. err}
end
local upstream_id = conf.upstream_id
if upstream_id then
local key = "/upstreams/" .. upstream_id
local res, err = core.etcd.get(key)
if not res then
return nil, {error_msg = "failed to fetch upstream info by "
.. "upstream id [" .. upstream_id .. "]: "
.. err}
end
if res.status ~= 200 then
return nil, {error_msg = "failed to fetch upstream info by "
.. "upstream id [" .. upstream_id .. "], "
.. "response code: " .. res.status}
end
end
local service_id = conf.service_id
if service_id then
local key = "/services/" .. service_id
local res, err = core.etcd.get(key)
if not res then
return nil, {error_msg = "failed to fetch service info by "
.. "service id [" .. service_id .. "]: "
.. err}
end
if res.status ~= 200 then
return nil, {error_msg = "failed to fetch service info by "
.. "service id [" .. service_id .. "], "
.. "response code: " .. res.status}
end
end
local ok, err = stream_route_checker(conf, true)
if not ok then
return nil, {error_msg = err}
end
return true
end
return resource.new({
name = "stream_routes",
kind = "stream route",
schema = core.schema.stream_route,
checker = check_conf,
unsupported_methods = { "patch" },
list_filter_fields = {
service_id = true,
upstream_id = true,
},
})

View File

@@ -0,0 +1,134 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local config_util = require("apisix.core.config_util")
local get_routes = require("apisix.router").http_routes
local get_services = require("apisix.http.service").services
local get_plugin_configs = require("apisix.plugin_config").plugin_configs
local get_consumers = require("apisix.consumer").consumers
local get_consumer_groups = require("apisix.consumer_group").consumer_groups
local get_global_rules = require("apisix.global_rules").global_rules
local apisix_upstream = require("apisix.upstream")
local resource = require("apisix.admin.resource")
local tostring = tostring
local ipairs = ipairs
local function check_conf(id, conf, need_id)
local ok, err = apisix_upstream.check_upstream_conf(conf)
if not ok then
return nil, {error_msg = err}
end
return true
end
local function up_id_in_plugins(plugins, up_id)
if plugins and plugins["traffic-split"]
and plugins["traffic-split"].rules then
for _, rule in ipairs(plugins["traffic-split"].rules) do
local plugin_upstreams = rule.weighted_upstreams
for _, plugin_upstream in ipairs(plugin_upstreams) do
if plugin_upstream.upstream_id
and tostring(plugin_upstream.upstream_id) == up_id then
return true
end
end
end
return false
end
end
local function check_resources_reference(resources, up_id,
only_check_plugin, resources_name)
if resources then
for _, resource in config_util.iterate_values(resources) do
if resource and resource.value then
if up_id_in_plugins(resource.value.plugins, up_id) then
return {error_msg = "can not delete this upstream,"
.. " plugin in "
.. resources_name .. " ["
.. resource.value.id
.. "] is still using it now"}
end
if not only_check_plugin and resource.value.upstream_id
and tostring(resource.value.upstream_id) == up_id then
return {error_msg = "can not delete this upstream, "
.. resources_name .. " [" .. resource.value.id
.. "] is still using it now"}
end
end
end
end
end
local function delete_checker(id)
local routes = get_routes()
local err_msg = check_resources_reference(routes, id, false, "route")
if err_msg then
return 400, err_msg
end
local services, services_ver = get_services()
core.log.info("services: ", core.json.delay_encode(services, true))
core.log.info("services_ver: ", services_ver)
local err_msg = check_resources_reference(services, id, false, "service")
if err_msg then
return 400, err_msg
end
local plugin_configs = get_plugin_configs()
local err_msg = check_resources_reference(plugin_configs, id, true, "plugin_config")
if err_msg then
return 400, err_msg
end
local consumers = get_consumers()
local err_msg = check_resources_reference(consumers, id, true, "consumer")
if err_msg then
return 400, err_msg
end
local consumer_groups = get_consumer_groups()
local err_msg = check_resources_reference(consumer_groups, id, true, "consumer_group")
if err_msg then
return 400, err_msg
end
local global_rules = get_global_rules()
err_msg = check_resources_reference(global_rules, id, true, "global_rules")
if err_msg then
return 400, err_msg
end
return nil, nil
end
return resource.new({
name = "upstreams",
kind = "upstream",
schema = core.schema.upstream,
checker = check_conf,
delete_checker = delete_checker
})

View File

@@ -0,0 +1,113 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local ngx_time = ngx.time
local tonumber = tonumber
local ipairs = ipairs
local pairs = pairs
local _M = {}
local function inject_timestamp(conf, prev_conf, patch_conf)
if not conf.create_time then
if prev_conf and (prev_conf.node or prev_conf.list).value.create_time then
conf.create_time = (prev_conf.node or prev_conf.list).value.create_time
else
-- As we don't know existent data's create_time, we have to pretend
-- they are created now.
conf.create_time = ngx_time()
end
end
if not conf.update_time or
-- For PATCH request, the modification is passed as 'patch_conf'
-- If the sub path is used, the 'patch_conf' will be a placeholder `true`
(patch_conf and (patch_conf == true or patch_conf.update_time == nil))
then
-- reset the update_time if:
-- 1. PATCH request, with sub path
-- 2. PATCH request, update_time not given
-- 3. Other request, update_time not given
conf.update_time = ngx_time()
end
end
_M.inject_timestamp = inject_timestamp
function _M.inject_conf_with_prev_conf(kind, key, conf)
local res, err = core.etcd.get(key)
if not res or (res.status ~= 200 and res.status ~= 404) then
core.log.error("failed to get " .. kind .. "[", key, "] from etcd: ", err or res.status)
return nil, err
end
if res.status == 404 then
inject_timestamp(conf)
else
inject_timestamp(conf, res.body)
end
return true
end
-- fix_count makes the "count" field returned by etcd reasonable
function _M.fix_count(body, id)
if body.count then
if not id then
-- remove the count of placeholder (init_dir)
body.count = tonumber(body.count) - 1
else
body.count = tonumber(body.count)
end
end
end
function _M.decrypt_params(decrypt_func, body, schema_type)
-- list
if body.list then
for _, route in ipairs(body.list) do
if route.value and route.value.plugins then
for name, conf in pairs(route.value.plugins) do
decrypt_func(name, conf, schema_type)
end
end
end
return
end
-- node
local plugins = body.node and body.node.value
and body.node.value.plugins
if plugins then
for name, conf in pairs(plugins) do
decrypt_func(name, conf, schema_type)
end
end
-- metadata
if schema_type == core.schema.TYPE_METADATA then
local conf = body.node and body.node.value
decrypt_func(conf.name, conf, schema_type)
end
end
return _M

View File

@@ -0,0 +1,249 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local type = type
local pairs = pairs
local tonumber = tonumber
local ngx = ngx
local re_find = ngx.re.find
local fetch_local_conf = require("apisix.core.config_local").local_conf
local try_read_attr = require("apisix.core.table").try_read_attr
local deepcopy = require("apisix.core.table").deepcopy
local log = require("apisix.core.log")
local request = require("apisix.core.request")
local response = require("apisix.core.response")
local table = require("apisix.core.table")
local _M = {}
local admin_api_version
local function enable_v3()
if admin_api_version then
if admin_api_version == "v3" then
return true
end
if admin_api_version == "default" then
return false
end
end
local local_conf, err = fetch_local_conf()
if not local_conf then
admin_api_version = "default"
log.error("failed to fetch local conf: ", err)
return false
end
local api_ver = try_read_attr(local_conf, "deployment", "admin", "admin_api_version")
if api_ver ~= "v3" then
admin_api_version = "default"
return false
end
admin_api_version = api_ver
return true
end
_M.enable_v3 = enable_v3
function _M.to_v3(body, action)
if not enable_v3() then
body.action = action
end
end
function _M.to_v3_list(body)
if not enable_v3() then
return
end
if body.node.dir then
body.list = body.node.nodes
body.node = nil
end
end
local function sort(l, r)
return l.createdIndex < r.createdIndex
end
local function pagination(body, args)
args.page = tonumber(args.page)
args.page_size = tonumber(args.page_size)
if not args.page or not args.page_size then
return
end
if args.page_size < 10 or args.page_size > 500 then
return response.exit(400, "page_size must be between 10 and 500")
end
if not args.page or args.page < 1 then
-- default page is 1
args.page = 1
end
local list = body.list
-- sort nodes by there createdIndex
table.sort(list, sort)
local to = args.page * args.page_size
local from = to - args.page_size + 1
local res = table.new(20, 0)
for i = from, to do
if list[i] then
res[i - from + 1] = list[i]
end
end
body.list = res
end
local function _filter(item, args, resource)
if not args.filter then
return true
end
local filters, err = ngx.decode_args(args.filter or "", 100)
if not filters then
log.error("failed to decode filter args: ", err)
return false
end
for key, value in pairs(filters) do
if not resource.list_filter_fields[key] then
log.warn("filter field '", key, "' is not supported by resource: ", resource.name)
goto CONTINUE
end
if not item[key] then
return false
end
if type(value) == "table" then
value = value[#value] -- get the last value in the table
end
if item[key] ~= value then
return false
end
::CONTINUE::
end
return true
end
local function filter(body, args, resource)
for i = #body.list, 1, -1 do
local name_matched = true
local label_matched = true
local uri_matched = true
if args.name then
name_matched = false
local matched = re_find(body.list[i].value.name, args.name, "jo")
if matched then
name_matched = true
end
end
if args.label then
label_matched = false
if body.list[i].value.labels then
for k, _ in pairs(body.list[i].value.labels) do
if k == args.label then
label_matched = true
break
end
end
end
end
if args.uri then
uri_matched = false
if body.list[i].value.uri then
local matched = re_find(body.list[i].value.uri, args.uri, "jo")
if matched then
uri_matched = true
end
end
if body.list[i].value.uris then
for _, uri in pairs(body.list[i].value.uris) do
if re_find(uri, args.uri, "jo") then
uri_matched = true
break
end
end
end
end
if not name_matched or not label_matched or not uri_matched
or not _filter(body.list[i].value, args, resource) then
table.remove(body.list, i)
end
end
end
function _M.filter(body, resource)
if not enable_v3() then
return body
end
local args = request.get_uri_args()
local processed_body = deepcopy(body)
if processed_body.deleted then
processed_body.node = nil
end
-- strip node wrapping for single query, create, and update scenarios.
if processed_body.node then
processed_body = processed_body.node
end
-- filter and paging logic for list query only
if processed_body.list then
filter(processed_body, args, resource)
-- calculate the total amount of filtered data
processed_body.total = processed_body.list and #processed_body.list or 0
pagination(processed_body, args)
-- remove the count field returned by etcd
-- we don't need a field that reflects the length of the currently returned data,
-- it doesn't make sense
processed_body.count = nil
end
return processed_body
end
return _M

View File

@@ -0,0 +1,116 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local require = require
local router = require("apisix.utils.router")
local plugin_mod = require("apisix.plugin")
local core = require("apisix.core")
local ipairs = ipairs
local ngx_header = ngx.header
local type = type
local _M = {}
local match_opts = {}
local has_route_not_under_apisix
local fetch_api_router
do
local routes = {}
function fetch_api_router()
core.table.clear(routes)
has_route_not_under_apisix = false
for _, plugin in ipairs(plugin_mod.plugins) do
local api_fun = plugin.api
if api_fun then
local api_routes = api_fun()
core.log.debug("fetched api routes: ",
core.json.delay_encode(api_routes, true))
for _, route in ipairs(api_routes) do
if route.uri == nil then
core.log.error("got nil uri in api route: ",
core.json.delay_encode(route, true))
break
end
local typ_uri = type(route.uri)
if not has_route_not_under_apisix then
if typ_uri == "string" then
if not core.string.has_prefix(route.uri, "/apisix/") then
has_route_not_under_apisix = true
end
else
for _, uri in ipairs(route.uri) do
if not core.string.has_prefix(uri, "/apisix/") then
has_route_not_under_apisix = true
break
end
end
end
end
core.table.insert(routes, {
methods = route.methods,
paths = route.uri,
handler = function (api_ctx)
local code, body = route.handler(api_ctx)
if code or body then
if type(body) == "table" and ngx_header["Content-Type"] == nil then
core.response.set_header("Content-Type", "application/json")
end
core.response.exit(code, body)
end
end
})
end
end
end
return router.new(routes)
end
end -- do
function _M.has_route_not_under_apisix()
if has_route_not_under_apisix == nil then
return true
end
return has_route_not_under_apisix
end
function _M.match(api_ctx)
local api_router = core.lrucache.global("api_router", plugin_mod.load_times, fetch_api_router)
if not api_router then
core.log.error("failed to fetch valid api router")
return false
end
core.table.clear(match_opts)
match_opts.method = api_ctx.var.request_method
local ok = api_router:dispatch(api_ctx.var.uri, match_opts, api_ctx)
return ok
end
return _M

View File

@@ -0,0 +1,400 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local require = require
local balancer = require("ngx.balancer")
local core = require("apisix.core")
local priority_balancer = require("apisix.balancer.priority")
local apisix_upstream = require("apisix.upstream")
local ipairs = ipairs
local is_http = ngx.config.subsystem == "http"
local enable_keepalive = balancer.enable_keepalive and is_http
local set_more_tries = balancer.set_more_tries
local get_last_failure = balancer.get_last_failure
local set_timeouts = balancer.set_timeouts
local ngx_now = ngx.now
local str_byte = string.byte
local module_name = "balancer"
local pickers = {}
local lrucache_server_picker = core.lrucache.new({
ttl = 300, count = 256
})
local lrucache_addr = core.lrucache.new({
ttl = 300, count = 1024 * 4
})
local _M = {
version = 0.2,
name = module_name,
}
local function transform_node(new_nodes, node)
if not new_nodes._priority_index then
new_nodes._priority_index = {}
end
if not new_nodes[node.priority] then
new_nodes[node.priority] = {}
core.table.insert(new_nodes._priority_index, node.priority)
end
new_nodes[node.priority][node.host .. ":" .. node.port] = node.weight
return new_nodes
end
local function fetch_health_nodes(upstream, checker)
local nodes = upstream.nodes
if not checker then
local new_nodes = core.table.new(0, #nodes)
for _, node in ipairs(nodes) do
new_nodes = transform_node(new_nodes, node)
end
return new_nodes
end
local host = upstream.checks and upstream.checks.active and upstream.checks.active.host
local port = upstream.checks and upstream.checks.active and upstream.checks.active.port
local up_nodes = core.table.new(0, #nodes)
for _, node in ipairs(nodes) do
local ok, err = checker:get_target_status(node.host, port or node.port, host)
if ok then
up_nodes = transform_node(up_nodes, node)
elseif err then
core.log.warn("failed to get health check target status, addr: ",
node.host, ":", port or node.port, ", host: ", host, ", err: ", err)
end
end
if core.table.nkeys(up_nodes) == 0 then
core.log.warn("all upstream nodes is unhealthy, use default")
for _, node in ipairs(nodes) do
up_nodes = transform_node(up_nodes, node)
end
end
return up_nodes
end
local function create_server_picker(upstream, checker)
local picker = pickers[upstream.type]
if not picker then
pickers[upstream.type] = require("apisix.balancer." .. upstream.type)
picker = pickers[upstream.type]
end
if picker then
local nodes = upstream.nodes
local addr_to_domain = {}
for _, node in ipairs(nodes) do
if node.domain then
local addr = node.host .. ":" .. node.port
addr_to_domain[addr] = node.domain
end
end
local up_nodes = fetch_health_nodes(upstream, checker)
if #up_nodes._priority_index > 1 then
core.log.info("upstream nodes: ", core.json.delay_encode(up_nodes))
local server_picker = priority_balancer.new(up_nodes, upstream, picker)
server_picker.addr_to_domain = addr_to_domain
return server_picker
end
core.log.info("upstream nodes: ",
core.json.delay_encode(up_nodes[up_nodes._priority_index[1]]))
local server_picker = picker.new(up_nodes[up_nodes._priority_index[1]], upstream)
server_picker.addr_to_domain = addr_to_domain
return server_picker
end
return nil, "invalid balancer type: " .. upstream.type, 0
end
local function parse_addr(addr)
local host, port, err = core.utils.parse_addr(addr)
return {host = host, port = port}, err
end
-- set_balancer_opts will be called in balancer phase and before any tries
local function set_balancer_opts(route, ctx)
local up_conf = ctx.upstream_conf
-- If the matched route has timeout config, prefer to use the route config.
local timeout = nil
if route and route.value and route.value.timeout then
timeout = route.value.timeout
else
if up_conf.timeout then
timeout = up_conf.timeout
end
end
if timeout then
local ok, err = set_timeouts(timeout.connect, timeout.send,
timeout.read)
if not ok then
core.log.error("could not set upstream timeouts: ", err)
end
end
local retries = up_conf.retries
if not retries or retries < 0 then
retries = #up_conf.nodes - 1
end
if retries > 0 then
if up_conf.retry_timeout and up_conf.retry_timeout > 0 then
ctx.proxy_retry_deadline = ngx_now() + up_conf.retry_timeout
end
local ok, err = set_more_tries(retries)
if not ok then
core.log.error("could not set upstream retries: ", err)
elseif err then
core.log.warn("could not set upstream retries: ", err)
end
end
end
local function parse_server_for_upstream_host(picked_server, upstream_scheme)
local standard_port = apisix_upstream.scheme_to_port[upstream_scheme]
local host = picked_server.domain or picked_server.host
if upstream_scheme and (not standard_port or standard_port ~= picked_server.port) then
host = host .. ":" .. picked_server.port
end
return host
end
-- pick_server will be called:
-- 1. in the access phase so that we can set headers according to the picked server
-- 2. each time we need to retry upstream
local function pick_server(route, ctx)
core.log.info("route: ", core.json.delay_encode(route, true))
core.log.info("ctx: ", core.json.delay_encode(ctx, true))
local up_conf = ctx.upstream_conf
for _, node in ipairs(up_conf.nodes) do
if core.utils.parse_ipv6(node.host) and str_byte(node.host, 1) ~= str_byte("[") then
node.host = '[' .. node.host .. ']'
end
end
local nodes_count = #up_conf.nodes
if nodes_count == 1 then
local node = up_conf.nodes[1]
ctx.balancer_ip = node.host
ctx.balancer_port = node.port
node.upstream_host = parse_server_for_upstream_host(node, ctx.upstream_scheme)
return node
end
local version = ctx.upstream_version
local key = ctx.upstream_key
local checker = ctx.up_checker
ctx.balancer_try_count = (ctx.balancer_try_count or 0) + 1
if ctx.balancer_try_count > 1 then
if ctx.server_picker and ctx.server_picker.after_balance then
ctx.server_picker.after_balance(ctx, true)
end
if checker then
local state, code = get_last_failure()
local host = up_conf.checks and up_conf.checks.active and up_conf.checks.active.host
local port = up_conf.checks and up_conf.checks.active and up_conf.checks.active.port
if state == "failed" then
if code == 504 then
checker:report_timeout(ctx.balancer_ip, port or ctx.balancer_port, host)
else
checker:report_tcp_failure(ctx.balancer_ip, port or ctx.balancer_port, host)
end
else
checker:report_http_status(ctx.balancer_ip, port or ctx.balancer_port, host, code)
end
end
end
if checker then
version = version .. "#" .. checker.status_ver
end
-- the same picker will be used in the whole request, especially during the retry
local server_picker = ctx.server_picker
if not server_picker then
server_picker = lrucache_server_picker(key, version,
create_server_picker, up_conf, checker)
end
if not server_picker then
return nil, "failed to fetch server picker"
end
local server, err = server_picker.get(ctx)
if not server then
err = err or "no valid upstream node"
return nil, "failed to find valid upstream server, " .. err
end
ctx.balancer_server = server
local domain = server_picker.addr_to_domain[server]
local res, err = lrucache_addr(server, nil, parse_addr, server)
if err then
core.log.error("failed to parse server addr: ", server, " err: ", err)
return core.response.exit(502)
end
res.domain = domain
ctx.balancer_ip = res.host
ctx.balancer_port = res.port
ctx.server_picker = server_picker
res.upstream_host = parse_server_for_upstream_host(res, ctx.upstream_scheme)
return res
end
-- for test
_M.pick_server = pick_server
local set_current_peer
do
local pool_opt = {}
local default_keepalive_pool
function set_current_peer(server, ctx)
local up_conf = ctx.upstream_conf
local keepalive_pool = up_conf.keepalive_pool
if enable_keepalive then
if not keepalive_pool then
if not default_keepalive_pool then
local local_conf = core.config.local_conf()
local up_keepalive_conf =
core.table.try_read_attr(local_conf, "nginx_config",
"http", "upstream")
default_keepalive_pool = {}
default_keepalive_pool.idle_timeout =
core.config_util.parse_time_unit(up_keepalive_conf.keepalive_timeout)
default_keepalive_pool.size = up_keepalive_conf.keepalive
default_keepalive_pool.requests = up_keepalive_conf.keepalive_requests
end
keepalive_pool = default_keepalive_pool
end
local idle_timeout = keepalive_pool.idle_timeout
local size = keepalive_pool.size
local requests = keepalive_pool.requests
core.table.clear(pool_opt)
pool_opt.pool_size = size
local scheme = up_conf.scheme
local pool = scheme .. "#" .. server.host .. "#" .. server.port
-- other TLS schemes don't use http balancer keepalive
if (scheme == "https" or scheme == "grpcs") then
local sni = ctx.var.upstream_host
pool = pool .. "#" .. sni
if up_conf.tls and up_conf.tls.client_cert then
pool = pool .. "#" .. up_conf.tls.client_cert
end
end
pool_opt.pool = pool
local ok, err = balancer.set_current_peer(server.host, server.port,
pool_opt)
if not ok then
return ok, err
end
return balancer.enable_keepalive(idle_timeout, requests)
end
return balancer.set_current_peer(server.host, server.port)
end
end
function _M.run(route, ctx, plugin_funcs)
local server, err
if ctx.picked_server then
-- use the server picked in the access phase
server = ctx.picked_server
ctx.picked_server = nil
set_balancer_opts(route, ctx)
else
if ctx.proxy_retry_deadline and ctx.proxy_retry_deadline < ngx_now() then
-- retry count is (try count - 1)
core.log.error("proxy retry timeout, retry count: ", (ctx.balancer_try_count or 1) - 1,
", deadline: ", ctx.proxy_retry_deadline, " now: ", ngx_now())
return core.response.exit(502)
end
-- retry
server, err = pick_server(route, ctx)
if not server then
core.log.error("failed to pick server: ", err)
return core.response.exit(502)
end
local header_changed
local pass_host = ctx.pass_host
if pass_host == "node" then
local host = server.upstream_host
if host ~= ctx.var.upstream_host then
-- retried node has a different host
ctx.var.upstream_host = host
header_changed = true
end
end
local _, run = plugin_funcs("before_proxy")
-- always recreate request as the request may be changed by plugins
if run or header_changed then
balancer.recreate_request()
end
end
core.log.info("proxy request to ", server.host, ":", server.port)
local ok, err = set_current_peer(server, ctx)
if not ok then
core.log.error("failed to set server peer [", server.host, ":",
server.port, "] err: ", err)
return core.response.exit(502)
end
ctx.proxy_passed = true
end
function _M.init_worker()
end
return _M

View File

@@ -0,0 +1,154 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local resty_chash = require("resty.chash")
local str_char = string.char
local str_gsub = string.gsub
local pairs = pairs
local CONSISTENT_POINTS = 160 -- points per server, taken from `resty.chash`
local _M = {}
local function fetch_chash_hash_key(ctx, upstream)
local key = upstream.key
local hash_on = upstream.hash_on or "vars"
local chash_key
if hash_on == "consumer" then
chash_key = ctx.consumer_name
elseif hash_on == "vars" then
chash_key = ctx.var[key]
elseif hash_on == "header" then
chash_key = ctx.var["http_" .. key]
elseif hash_on == "cookie" then
chash_key = ctx.var["cookie_" .. key]
elseif hash_on == "vars_combinations" then
local err, n_resolved
chash_key, err, n_resolved = core.utils.resolve_var(key, ctx.var)
if err then
core.log.error("could not resolve vars in ", key, " error: ", err)
end
if n_resolved == 0 then
chash_key = nil
end
end
if not chash_key then
chash_key = ctx.var["remote_addr"]
core.log.warn("chash_key fetch is nil, use default chash_key ",
"remote_addr: ", chash_key)
end
core.log.info("upstream key: ", key)
core.log.info("hash_on: ", hash_on)
core.log.info("chash_key: ", core.json.delay_encode(chash_key))
return chash_key
end
function _M.new(up_nodes, upstream)
local str_null = str_char(0)
local nodes_count = 0
local safe_limit = 0
local gcd = 0
local servers, nodes = {}, {}
for serv, weight in pairs(up_nodes) do
if gcd == 0 then
gcd = weight
else
gcd = core.math.gcd(gcd, weight)
end
end
if gcd == 0 then
-- all nodes' weight are 0
gcd = 1
end
for serv, weight in pairs(up_nodes) do
local id = str_gsub(serv, ":", str_null)
nodes_count = nodes_count + 1
weight = weight / gcd
safe_limit = safe_limit + weight
servers[id] = serv
nodes[id] = weight
end
safe_limit = safe_limit * CONSISTENT_POINTS
local picker = resty_chash:new(nodes)
return {
upstream = upstream,
get = function (ctx)
local id
if ctx.balancer_tried_servers then
if ctx.balancer_tried_servers_count == nodes_count then
return nil, "all upstream servers tried"
end
-- the 'safe_limit' is a best effort limit to prevent infinite loop caused by bug
for i = 1, safe_limit do
id, ctx.chash_last_server_index = picker:next(ctx.chash_last_server_index)
if not ctx.balancer_tried_servers[servers[id]] then
break
end
end
else
local chash_key = fetch_chash_hash_key(ctx, upstream)
id, ctx.chash_last_server_index = picker:find(chash_key)
end
-- core.log.warn("chash id: ", id, " val: ", servers[id])
return servers[id]
end,
after_balance = function (ctx, before_retry)
if not before_retry then
if ctx.balancer_tried_servers then
core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers)
ctx.balancer_tried_servers = nil
end
return nil
end
if not ctx.balancer_tried_servers then
ctx.balancer_tried_servers = core.tablepool.fetch("balancer_tried_servers", 0, 2)
end
ctx.balancer_tried_servers[ctx.balancer_server] = true
ctx.balancer_tried_servers_count = (ctx.balancer_tried_servers_count or 0) + 1
end,
before_retry_next_priority = function (ctx)
if ctx.balancer_tried_servers then
core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers)
ctx.balancer_tried_servers = nil
end
ctx.balancer_tried_servers_count = 0
end,
}
end
return _M

View File

@@ -0,0 +1,243 @@
-- Original Authors: Shiv Nagarajan & Scott Francis
-- Accessed: March 12, 2018
-- Inspiration drawn from:
-- https://github.com/twitter/finagle/blob/1bc837c4feafc0096e43c0e98516a8e1c50c4421
-- /finagle-core/src/main/scala/com/twitter/finagle/loadbalancer/PeakEwma.scala
local core = require("apisix.core")
local resty_lock = require("resty.lock")
local nkeys = core.table.nkeys
local table_insert = core.table.insert
local ngx = ngx
local ngx_shared = ngx.shared
local ngx_now = ngx.now
local math = math
local pairs = pairs
local ipairs = ipairs
local next = next
local error = error
local DECAY_TIME = 10 -- this value is in seconds
local LOCK_KEY = ":ewma_key"
local shm_ewma = ngx_shared["balancer-ewma"]
local shm_last_touched_at = ngx_shared["balancer-ewma-last-touched-at"]
local lrucache_addr = core.lrucache.new({ttl = 300, count = 1024})
local lrucache_trans_format = core.lrucache.new({ttl = 300, count = 256})
local ewma_lock, ewma_lock_err = resty_lock:new("balancer-ewma-locks", {timeout = 0, exptime = 0.1})
local _M = {name = "ewma"}
local function lock(upstream)
local _, err = ewma_lock:lock(upstream .. LOCK_KEY)
if err and err ~= "timeout" then
core.log.error("EWMA Balancer failed to lock: ", err)
end
return err
end
local function unlock()
local ok, err = ewma_lock:unlock()
if not ok then
core.log.error("EWMA Balancer failed to unlock: ", err)
end
return err
end
local function decay_ewma(ewma, last_touched_at, rtt, now)
local td = now - last_touched_at
td = math.max(td, 0)
local weight = math.exp(-td / DECAY_TIME)
ewma = ewma * weight + rtt * (1.0 - weight)
return ewma
end
local function store_stats(upstream, ewma, now)
local success, err, forcible = shm_last_touched_at:set(upstream, now)
if not success then
core.log.error("shm_last_touched_at:set failed: ", err)
end
if forcible then
core.log.warn("shm_last_touched_at:set valid items forcibly overwritten")
end
success, err, forcible = shm_ewma:set(upstream, ewma)
if not success then
core.log.error("shm_ewma:set failed: ", err)
end
if forcible then
core.log.warn("shm_ewma:set valid items forcibly overwritten")
end
end
local function get_or_update_ewma(upstream, rtt, update)
if update then
local lock_err = lock(upstream)
if lock_err ~= nil then
return 0, lock_err
end
end
local ewma = shm_ewma:get(upstream) or 0
local now = ngx_now()
local last_touched_at = shm_last_touched_at:get(upstream) or 0
ewma = decay_ewma(ewma, last_touched_at, rtt, now)
if not update then
return ewma, nil
end
store_stats(upstream, ewma, now)
unlock()
return ewma, nil
end
local function get_upstream_name(upstream)
return upstream.host .. ":" .. upstream.port
end
local function score(upstream)
-- Original implementation used names
-- Endpoints don't have names, so passing in IP:Port as key instead
local upstream_name = get_upstream_name(upstream)
return get_or_update_ewma(upstream_name, 0, false)
end
local function parse_addr(addr)
local host, port, err = core.utils.parse_addr(addr)
return {host = host, port = port}, err
end
local function _trans_format(up_nodes)
-- trans
-- {"1.2.3.4:80":100,"5.6.7.8:8080":100}
-- into
-- [{"host":"1.2.3.4","port":"80"},{"host":"5.6.7.8","port":"8080"}]
local peers = {}
local res, err
for addr, _ in pairs(up_nodes) do
res, err = lrucache_addr(addr, nil, parse_addr, addr)
if not err then
core.table.insert(peers, res)
else
core.log.error('parse_addr error: ', addr, err)
end
end
return next(peers) and peers or nil
end
local function _ewma_find(ctx, up_nodes)
local peers
if not up_nodes or nkeys(up_nodes) == 0 then
return nil, 'up_nodes empty'
end
if ctx.balancer_tried_servers and ctx.balancer_tried_servers_count == nkeys(up_nodes) then
return nil, "all upstream servers tried"
end
peers = lrucache_trans_format(up_nodes, ctx.upstream_version, _trans_format, up_nodes)
if not peers then
return nil, 'up_nodes trans error'
end
local filtered_peers
if ctx.balancer_tried_servers then
for _, peer in ipairs(peers) do
if not ctx.balancer_tried_servers[get_upstream_name(peer)] then
if not filtered_peers then
filtered_peers = {}
end
table_insert(filtered_peers, peer)
end
end
else
filtered_peers = peers
end
local endpoint = filtered_peers[1]
if #filtered_peers > 1 then
local a, b = math.random(1, #filtered_peers), math.random(1, #filtered_peers - 1)
if b >= a then
b = b + 1
end
local backendpoint
endpoint, backendpoint = filtered_peers[a], filtered_peers[b]
if score(endpoint) > score(backendpoint) then
endpoint = backendpoint
end
end
return get_upstream_name(endpoint)
end
local function _ewma_after_balance(ctx, before_retry)
if before_retry then
if not ctx.balancer_tried_servers then
ctx.balancer_tried_servers = core.tablepool.fetch("balancer_tried_servers", 0, 2)
end
ctx.balancer_tried_servers[ctx.balancer_server] = true
ctx.balancer_tried_servers_count = (ctx.balancer_tried_servers_count or 0) + 1
return nil
end
if ctx.balancer_tried_servers then
core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers)
ctx.balancer_tried_servers = nil
end
local response_time = ctx.var.upstream_response_time or 0
local connect_time = ctx.var.upstream_connect_time or 0
local rtt = connect_time + response_time
local upstream = ctx.var.upstream_addr
if not upstream then
return nil, "no upstream addr found"
end
return get_or_update_ewma(upstream, rtt, true)
end
function _M.new(up_nodes, upstream)
if not shm_ewma or not shm_last_touched_at then
return nil, "dictionary not find"
end
if not ewma_lock then
error(ewma_lock_err)
end
return {
upstream = upstream,
get = function(ctx)
return _ewma_find(ctx, up_nodes)
end,
after_balance = _ewma_after_balance,
before_retry_next_priority = function (ctx)
if ctx.balancer_tried_servers then
core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers)
ctx.balancer_tried_servers = nil
end
ctx.balancer_tried_servers_count = 0
end,
}
end
return _M

View File

@@ -0,0 +1,113 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local binaryHeap = require("binaryheap")
local ipairs = ipairs
local pairs = pairs
local _M = {}
local function least_score(a, b)
return a.score < b.score
end
function _M.new(up_nodes, upstream)
local servers_heap = binaryHeap.minUnique(least_score)
for server, weight in pairs(up_nodes) do
local score = 1 / weight
-- Note: the argument order of insert is different from others
servers_heap:insert({
server = server,
effect_weight = 1 / weight,
score = score,
}, server)
end
return {
upstream = upstream,
get = function (ctx)
local server, info, err
if ctx.balancer_tried_servers then
local tried_server_list = {}
while true do
server, info = servers_heap:peek()
-- we need to let the retry > #nodes so this branch can be hit and
-- the request will retry next priority of nodes
if server == nil then
err = "all upstream servers tried"
break
end
if not ctx.balancer_tried_servers[server] then
break
end
servers_heap:pop()
core.table.insert(tried_server_list, info)
end
for _, info in ipairs(tried_server_list) do
servers_heap:insert(info, info.server)
end
else
server, info = servers_heap:peek()
end
if not server then
return nil, err
end
info.score = info.score + info.effect_weight
servers_heap:update(server, info)
return server
end,
after_balance = function (ctx, before_retry)
local server = ctx.balancer_server
local info = servers_heap:valueByPayload(server)
info.score = info.score - info.effect_weight
servers_heap:update(server, info)
if not before_retry then
if ctx.balancer_tried_servers then
core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers)
ctx.balancer_tried_servers = nil
end
return nil
end
if not ctx.balancer_tried_servers then
ctx.balancer_tried_servers = core.tablepool.fetch("balancer_tried_servers", 0, 2)
end
ctx.balancer_tried_servers[server] = true
end,
before_retry_next_priority = function (ctx)
if ctx.balancer_tried_servers then
core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers)
ctx.balancer_tried_servers = nil
end
end,
}
end
return _M

View File

@@ -0,0 +1,81 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local ipairs = ipairs
local _M = {}
local function max_priority(a, b)
return a > b
end
function _M.new(up_nodes, upstream, picker_mod)
local priority_index = up_nodes._priority_index
core.table.sort(priority_index, max_priority)
local pickers = core.table.new(#priority_index, 0)
for i, priority in ipairs(priority_index) do
local picker, err = picker_mod.new(up_nodes[priority], upstream)
if not picker then
return nil, "failed to create picker with priority " .. priority .. ": " .. err
end
if not picker.before_retry_next_priority then
return nil, "picker should define 'before_retry_next_priority' to reset ctx"
end
pickers[i] = picker
end
return {
upstream = upstream,
get = function (ctx)
for i = ctx.priority_balancer_picker_idx or 1, #pickers do
local picker = pickers[i]
local server, err = picker.get(ctx)
if server then
ctx.priority_balancer_picker_idx = i
return server
end
core.log.notice("failed to get server from current priority ",
priority_index[i],
", try next one, err: ", err)
picker.before_retry_next_priority(ctx)
end
return nil, "all servers tried"
end,
after_balance = function (ctx, before_retry)
local priority_balancer_picker = pickers[ctx.priority_balancer_picker_idx]
if not priority_balancer_picker or
not priority_balancer_picker.after_balance
then
return
end
priority_balancer_picker.after_balance(ctx, before_retry)
end
}
end
return _M

View File

@@ -0,0 +1,89 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local roundrobin = require("resty.roundrobin")
local core = require("apisix.core")
local nkeys = core.table.nkeys
local pairs = pairs
local _M = {}
function _M.new(up_nodes, upstream)
local safe_limit = 0
for _, weight in pairs(up_nodes) do
-- the weight can be zero
safe_limit = safe_limit + weight + 1
end
local picker = roundrobin:new(up_nodes)
local nodes_count = nkeys(up_nodes)
return {
upstream = upstream,
get = function (ctx)
if ctx.balancer_tried_servers and ctx.balancer_tried_servers_count == nodes_count then
return nil, "all upstream servers tried"
end
local server, err
for i = 1, safe_limit do
server, err = picker:find()
if not server then
return nil, err
end
if ctx.balancer_tried_servers then
if not ctx.balancer_tried_servers[server] then
break
end
else
break
end
end
return server
end,
after_balance = function (ctx, before_retry)
if not before_retry then
if ctx.balancer_tried_servers then
core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers)
ctx.balancer_tried_servers = nil
end
return nil
end
if not ctx.balancer_tried_servers then
ctx.balancer_tried_servers = core.tablepool.fetch("balancer_tried_servers", 0, 2)
end
ctx.balancer_tried_servers[ctx.balancer_server] = true
ctx.balancer_tried_servers_count = (ctx.balancer_tried_servers_count or 0) + 1
end,
before_retry_next_priority = function (ctx)
if ctx.balancer_tried_servers then
core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers)
ctx.balancer_tried_servers = nil
end
ctx.balancer_tried_servers_count = 0
end,
}
end
return _M

View File

@@ -0,0 +1,40 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local pkg_cpath_org = package.cpath
local pkg_path_org = package.path
local _, find_pos_end = string.find(pkg_path_org, ";", -1, true)
if not find_pos_end then
pkg_path_org = pkg_path_org .. ";"
end
local apisix_home = "/usr/local/apisix"
local pkg_cpath = apisix_home .. "/deps/lib64/lua/5.1/?.so;"
.. apisix_home .. "/deps/lib/lua/5.1/?.so;"
local pkg_path_deps = apisix_home .. "/deps/share/lua/5.1/?.lua;"
local pkg_path_env = apisix_home .. "/?.lua;"
-- modify the load path to load our dependencies
package.cpath = pkg_cpath .. pkg_cpath_org
package.path = pkg_path_deps .. pkg_path_org .. pkg_path_env
-- pass path to construct the final result
local env = require("apisix.cli.env")(apisix_home, pkg_cpath_org, pkg_path_org)
local ops = require("apisix.cli.ops")
ops.execute(env, arg)

View File

@@ -0,0 +1,385 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local table_conact = table.concat
local _M = {
apisix = {
node_listen = { 9080 },
enable_admin = true,
enable_dev_mode = false,
enable_reuseport = true,
show_upstream_status_in_response_header = false,
enable_ipv6 = true,
enable_http2 = true,
enable_server_tokens = true,
extra_lua_path = "",
extra_lua_cpath = "",
proxy_cache = {
cache_ttl = "10s",
zones = {
{
name = "disk_cache_one",
memory_size = "50m",
disk_size = "1G",
disk_path = "/tmp/disk_cache_one",
cache_levels = "1:2"
},
{
name = "memory_cache",
memory_size = "50m"
}
}
},
delete_uri_tail_slash = false,
normalize_uri_like_servlet = false,
router = {
http = "radixtree_host_uri",
ssl = "radixtree_sni"
},
proxy_mode = "http",
resolver_timeout = 5,
enable_resolv_search_opt = true,
ssl = {
enable = true,
listen = { {
port = 9443,
enable_http3 = false
} },
ssl_protocols = "TLSv1.2 TLSv1.3",
ssl_ciphers = table_conact({
"ECDHE-ECDSA-AES128-GCM-SHA256", "ECDHE-RSA-AES128-GCM-SHA256",
"ECDHE-ECDSA-AES256-GCM-SHA384", "ECDHE-RSA-AES256-GCM-SHA384",
"ECDHE-ECDSA-CHACHA20-POLY1305", "ECDHE-RSA-CHACHA20-POLY1305",
"DHE-RSA-AES128-GCM-SHA256", "DHE-RSA-AES256-GCM-SHA384",
}, ":"),
ssl_session_tickets = false,
ssl_trusted_certificate = "system"
},
enable_control = true,
disable_sync_configuration_during_start = false,
data_encryption = {
enable_encrypt_fields = true,
keyring = { "qeddd145sfvddff3", "edd1c9f0985e76a2" }
},
events = {
module = "lua-resty-events"
}
},
nginx_config = {
error_log = "logs/error.log",
error_log_level = "warn",
worker_processes = "auto",
enable_cpu_affinity = false,
worker_rlimit_nofile = 20480,
worker_shutdown_timeout = "240s",
max_pending_timers = 16384,
max_running_timers = 4096,
event = {
worker_connections = 10620
},
meta = {
lua_shared_dict = {
["prometheus-metrics"] = "15m",
["standalone-config"] = "10m",
["status-report"] = "1m",
}
},
stream = {
enable_access_log = false,
access_log = "logs/access_stream.log",
-- luacheck: push max code line length 300
access_log_format = "$remote_addr [$time_local] $protocol $status $bytes_sent $bytes_received $session_time",
-- luacheck: pop
access_log_format_escape = "default",
lua_shared_dict = {
["etcd-cluster-health-check-stream"] = "10m",
["lrucache-lock-stream"] = "10m",
["plugin-limit-conn-stream"] = "10m",
["worker-events-stream"] = "10m",
["tars-stream"] = "1m",
["upstream-healthcheck-stream"] = "10m",
}
},
main_configuration_snippet = "",
http_configuration_snippet = "",
http_server_configuration_snippet = "",
http_server_location_configuration_snippet = "",
http_admin_configuration_snippet = "",
http_end_configuration_snippet = "",
stream_configuration_snippet = "",
http = {
enable_access_log = true,
access_log = "logs/access.log",
access_log_buffer = 16384,
-- luacheck: push max code line length 300
access_log_format =
'$remote_addr - $remote_user [$time_local] $http_host "$request" $status $body_bytes_sent $request_time "$http_referer" "$http_user_agent" $upstream_addr $upstream_status $upstream_response_time "$upstream_scheme://$upstream_host$upstream_uri"',
-- luacheck: pop
access_log_format_escape = "default",
keepalive_timeout = "60s",
client_header_timeout = "60s",
client_body_timeout = "60s",
client_max_body_size = 0,
send_timeout = "10s",
underscores_in_headers = "on",
real_ip_header = "X-Real-IP",
real_ip_recursive = "off",
real_ip_from = { "127.0.0.1", "unix:" },
proxy_ssl_server_name = true,
upstream = {
keepalive = 320,
keepalive_requests = 1000,
keepalive_timeout = "60s"
},
charset = "utf-8",
variables_hash_max_size = 2048,
lua_shared_dict = {
["internal-status"] = "10m",
["plugin-limit-req"] = "10m",
["plugin-limit-count"] = "10m",
["prometheus-metrics"] = "10m",
["plugin-limit-conn"] = "10m",
["upstream-healthcheck"] = "10m",
["worker-events"] = "10m",
["lrucache-lock"] = "10m",
["balancer-ewma"] = "10m",
["balancer-ewma-locks"] = "10m",
["balancer-ewma-last-touched-at"] = "10m",
["plugin-limit-req-redis-cluster-slot-lock"] = "1m",
["plugin-limit-count-redis-cluster-slot-lock"] = "1m",
["plugin-limit-conn-redis-cluster-slot-lock"] = "1m",
["plugin-ai-rate-limiting"] = "10m",
["plugin-ai-rate-limiting-reset-header"] = "10m",
tracing_buffer = "10m",
["plugin-api-breaker"] = "10m",
["etcd-cluster-health-check"] = "10m",
discovery = "1m",
jwks = "1m",
introspection = "10m",
["access-tokens"] = "1m",
["ext-plugin"] = "1m",
tars = "1m",
["cas-auth"] = "10m",
["ocsp-stapling"] = "10m",
["mcp-session"] = "10m",
}
}
},
graphql = {
max_size = 1048576
},
plugins = {
"real-ip",
"ai",
"client-control",
"proxy-control",
"request-id",
"zipkin",
"ext-plugin-pre-req",
"fault-injection",
"mocking",
"serverless-pre-function",
"cors",
"ip-restriction",
"ua-restriction",
"referer-restriction",
"csrf",
"uri-blocker",
"request-validation",
"chaitin-waf",
"multi-auth",
"openid-connect",
"cas-auth",
"authz-casbin",
"authz-casdoor",
"wolf-rbac",
"ldap-auth",
"hmac-auth",
"basic-auth",
"jwt-auth",
"jwe-decrypt",
"key-auth",
"consumer-restriction",
"attach-consumer-label",
"forward-auth",
"opa",
"authz-keycloak",
"proxy-cache",
"body-transformer",
"ai-prompt-template",
"ai-prompt-decorator",
"ai-prompt-guard",
"ai-rag",
"ai-rate-limiting",
"ai-proxy-multi",
"ai-proxy",
"ai-aws-content-moderation",
"proxy-mirror",
"proxy-rewrite",
"workflow",
"api-breaker",
"limit-conn",
"limit-count",
"limit-req",
"gzip",
-- deprecated and will be removed in a future release
-- "server-info",
"traffic-split",
"redirect",
"response-rewrite",
"mcp-bridge",
"degraphql",
"kafka-proxy",
"grpc-transcode",
"grpc-web",
"http-dubbo",
"public-api",
"prometheus",
"datadog",
"lago",
"loki-logger",
"elasticsearch-logger",
"echo",
"loggly",
"http-logger",
"splunk-hec-logging",
"skywalking-logger",
"google-cloud-logging",
"sls-logger",
"tcp-logger",
"kafka-logger",
"rocketmq-logger",
"syslog",
"udp-logger",
"file-logger",
"clickhouse-logger",
"tencent-cloud-cls",
"inspect",
"example-plugin",
"aws-lambda",
"azure-functions",
"openwhisk",
"openfunction",
"serverless-post-function",
"ext-plugin-post-req",
"ext-plugin-post-resp",
"ai-request-rewrite",
},
stream_plugins = { "ip-restriction", "limit-conn", "mqtt-proxy", "syslog" },
plugin_attr = {
["log-rotate"] = {
timeout = 10000,
interval = 3600,
max_kept = 168,
max_size = -1,
enable_compression = false
},
skywalking = {
service_name = "APISIX",
service_instance_name = "APISIX Instance Name",
endpoint_addr = "http://127.0.0.1:12800",
report_interval = 3
},
opentelemetry = {
trace_id_source = "x-request-id",
resource = {
["service.name"] = "APISIX"
},
collector = {
address = "127.0.0.1:4318",
request_timeout = 3,
request_headers = {
Authorization = "token"
}
},
batch_span_processor = {
drop_on_queue_full = false,
max_queue_size = 1024,
batch_timeout = 2,
inactive_timeout = 1,
max_export_batch_size = tonumber(os.getenv("OTEL_BSP_MAX_EXPORT_BATCH_SIZE")) or 16
},
set_ngx_var = false
},
prometheus = {
export_uri = "/apisix/prometheus/metrics",
metric_prefix = "apisix_",
enable_export_server = true,
export_addr = {
ip = "127.0.0.1",
port = 9091
}
},
["server-info"] = {
report_ttl = 60
},
["dubbo-proxy"] = {
upstream_multiplex_count = 32
},
["proxy-mirror"] = {
timeout = {
connect = "60s",
read = "60s",
send = "60s"
}
},
inspect = {
delay = 3,
hooks_file = "/usr/local/apisix/plugin_inspect_hooks.lua"
},
zipkin = {
set_ngx_var = false
}
},
deployment = {
role = "traditional",
role_traditional = {
config_provider = "etcd"
},
admin = {
admin_key_required = true,
admin_key = {
{
name = "admin",
key = "",
role = "admin"
}
},
enable_admin_cors = true,
enable_admin_ui = true,
allow_admin = { "127.0.0.0/24" },
admin_listen = {
ip = "0.0.0.0",
port = 9180
},
admin_api_version = "v3"
},
etcd = {
host = { "http://127.0.0.1:2379" },
prefix = "/apisix",
timeout = 30,
watch_timeout = 50,
startup_retry = 2,
tls = {
verify = true
}
}
}
}
return _M

View File

@@ -0,0 +1,115 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local require = require
local util = require("apisix.cli.util")
local pcall = pcall
local error = error
local exit = os.exit
local stderr = io.stderr
local str_find = string.find
local arg = arg
local package = package
local tonumber = tonumber
return function (apisix_home, pkg_cpath_org, pkg_path_org)
-- ulimit setting should be checked when APISIX starts
local res, err = util.execute_cmd("ulimit -n")
if not res then
error("failed to exec ulimit cmd \'ulimit -n \', err: " .. err)
end
local trimed_res = util.trim(res)
local ulimit = trimed_res == "unlimited" and trimed_res or tonumber(trimed_res)
if not ulimit then
error("failed to fetch current maximum number of open file descriptors")
end
-- only for developer, use current folder as working space
local is_root_path = false
local script_path = arg[0]
if script_path:sub(1, 2) == './' then
apisix_home = util.trim(util.execute_cmd("pwd"))
if not apisix_home then
error("failed to fetch current path")
end
-- determine whether the current path is under the "/root" folder.
-- "/root/" is the root folder flag.
if str_find(apisix_home .. "/", '/root/', nil, true) == 1 then
is_root_path = true
end
local pkg_cpath = apisix_home .. "/deps/lib64/lua/5.1/?.so;"
.. apisix_home .. "/deps/lib/lua/5.1/?.so;"
local pkg_path = apisix_home .. "/?/init.lua;"
.. apisix_home .. "/deps/share/lua/5.1/?/init.lua;"
.. apisix_home .. "/deps/share/lua/5.1/?.lua;;"
package.cpath = pkg_cpath .. package.cpath
package.path = pkg_path .. package.path
end
do
-- skip luajit environment
local ok = pcall(require, "table.new")
if not ok then
local ok, json = pcall(require, "cjson")
if ok and json then
stderr:write("please remove the cjson library in Lua, it may "
.. "conflict with the cjson library in openresty. "
.. "\n luarocks remove lua-cjson\n")
exit(1)
end
end
end
-- pre-transform openresty path
res, err = util.execute_cmd("command -v openresty")
if not res then
error("failed to exec cmd \'command -v openresty\', err: " .. err)
end
local openresty_path_abs = util.trim(res)
local openresty_args = openresty_path_abs .. [[ -p ]] .. apisix_home .. [[ -c ]]
.. apisix_home .. [[/conf/nginx.conf]]
local or_info, err = util.execute_cmd("openresty -V 2>&1")
if not or_info then
error("failed to exec cmd \'openresty -V 2>&1\', err: " .. err)
end
local use_apisix_base = true
if not or_info:find("apisix-nginx-module", 1, true) then
use_apisix_base = false
end
local min_etcd_version = "3.4.0"
return {
apisix_home = apisix_home,
is_root_path = is_root_path,
openresty_args = openresty_args,
openresty_info = or_info,
use_apisix_base = use_apisix_base,
pkg_cpath_org = pkg_cpath_org,
pkg_path_org = pkg_path_org,
min_etcd_version = min_etcd_version,
ulimit = ulimit,
}
end

View File

@@ -0,0 +1,405 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local base64_encode = require("base64").encode
local dkjson = require("dkjson")
local constants = require("apisix.constants")
local util = require("apisix.cli.util")
local file = require("apisix.cli.file")
local http = require("socket.http")
local https = require("ssl.https")
local ltn12 = require("ltn12")
local type = type
local ipairs = ipairs
local pairs = pairs
local print = print
local tonumber = tonumber
local str_format = string.format
local str_sub = string.sub
local table_concat = table.concat
local table_insert = table.insert
local io_stderr = io.stderr
local _M = {}
-- Timeout for all I/O operations
http.TIMEOUT = 3
local function parse_semantic_version(ver)
local errmsg = "invalid semantic version: " .. ver
local parts = util.split(ver, "-")
if #parts > 2 then
return nil, errmsg
end
if #parts == 2 then
ver = parts[1]
end
local fields = util.split(ver, ".")
if #fields ~= 3 then
return nil, errmsg
end
local major = tonumber(fields[1])
local minor = tonumber(fields[2])
local patch = tonumber(fields[3])
if not (major and minor and patch) then
return nil, errmsg
end
return {
major = major,
minor = minor,
patch = patch,
}
end
local function compare_semantic_version(v1, v2)
local ver1, err = parse_semantic_version(v1)
if not ver1 then
return nil, err
end
local ver2, err = parse_semantic_version(v2)
if not ver2 then
return nil, err
end
if ver1.major ~= ver2.major then
return ver1.major < ver2.major
end
if ver1.minor ~= ver2.minor then
return ver1.minor < ver2.minor
end
return ver1.patch < ver2.patch
end
local function request(url, yaml_conf)
local response_body = {}
local single_request = false
if type(url) == "string" then
url = {
url = url,
method = "GET",
sink = ltn12.sink.table(response_body),
}
single_request = true
end
local res, code
if str_sub(url.url, 1, 8) == "https://" then
local verify = "peer"
if yaml_conf.etcd.tls then
local cfg = yaml_conf.etcd.tls
if cfg.verify == false then
verify = "none"
end
url.certificate = cfg.cert
url.key = cfg.key
local apisix_ssl = yaml_conf.apisix.ssl
if apisix_ssl and apisix_ssl.ssl_trusted_certificate then
url.cafile = apisix_ssl.ssl_trusted_certificate
end
end
url.verify = verify
res, code = https.request(url)
else
res, code = http.request(url)
end
-- In case of failure, request returns nil followed by an error message.
-- Else the first return value is the response body
-- and followed by the response status code.
if single_request and res ~= nil then
return table_concat(response_body), code
end
return res, code
end
local function prepare_dirs_via_http(yaml_conf, args, index, host, host_count)
local is_success = true
local errmsg
local auth_token
local user = yaml_conf.etcd.user
local password = yaml_conf.etcd.password
if user and password then
local auth_url = host .. "/v3/auth/authenticate"
local json_auth = {
name = user,
password = password
}
local post_json_auth = dkjson.encode(json_auth)
local response_body = {}
local res, err
local retry_time = 0
while retry_time < 2 do
res, err = request({
url = auth_url,
method = "POST",
source = ltn12.source.string(post_json_auth),
sink = ltn12.sink.table(response_body),
headers = {
["Content-Length"] = #post_json_auth
}
}, yaml_conf)
-- In case of failure, request returns nil followed by an error message.
-- Else the first return value is just the number 1
-- and followed by the response status code.
if res then
break
end
retry_time = retry_time + 1
print(str_format("Warning! Request etcd endpoint \'%s\' error, %s, retry time=%s",
auth_url, err, retry_time))
end
if not res then
errmsg = str_format("request etcd endpoint \"%s\" error, %s\n", auth_url, err)
util.die(errmsg)
end
local res_auth = table_concat(response_body)
local body_auth, _, err_auth = dkjson.decode(res_auth)
if err_auth or (body_auth and not body_auth["token"]) then
errmsg = str_format("got malformed auth message: \"%s\" from etcd \"%s\"\n",
res_auth, auth_url)
util.die(errmsg)
end
auth_token = body_auth.token
end
local dirs = {}
for name in pairs(constants.HTTP_ETCD_DIRECTORY) do
dirs[name] = true
end
for name in pairs(constants.STREAM_ETCD_DIRECTORY) do
dirs[name] = true
end
for dir_name in pairs(dirs) do
local key = (yaml_conf.etcd.prefix or "") .. dir_name .. "/"
local put_url = host .. "/v3/kv/put"
local post_json = '{"value":"' .. base64_encode("init_dir")
.. '", "key":"' .. base64_encode(key) .. '"}'
local response_body = {}
local headers = {["Content-Length"] = #post_json}
if auth_token then
headers["Authorization"] = auth_token
end
local res, err
local retry_time = 0
while retry_time < 2 do
res, err = request({
url = put_url,
method = "POST",
source = ltn12.source.string(post_json),
sink = ltn12.sink.table(response_body),
headers = headers
}, yaml_conf)
retry_time = retry_time + 1
if res then
break
end
print(str_format("Warning! Request etcd endpoint \'%s\' error, %s, retry time=%s",
put_url, err, retry_time))
end
if not res then
errmsg = str_format("request etcd endpoint \"%s\" error, %s\n", put_url, err)
util.die(errmsg)
end
local res_put = table_concat(response_body)
if res_put:find("404 page not found", 1, true) then
errmsg = str_format("gRPC gateway is not enabled in etcd cluster \"%s\",",
"which is required by Apache APISIX\n")
util.die(errmsg)
end
if res_put:find("CommonName of client sending a request against gateway", 1, true) then
errmsg = str_format("etcd \"client-cert-auth\" cannot be used with gRPC-gateway, "
.. "please configure the etcd username and password "
.. "in configuration file\n")
util.die(errmsg)
end
if res_put:find("error", 1, true) then
is_success = false
if (index == host_count) then
errmsg = str_format("got malformed key-put message: \"%s\" from etcd \"%s\"\n",
res_put, put_url)
util.die(errmsg)
end
break
end
if args and args["verbose"] then
print(res_put)
end
end
return is_success
end
local function prepare_dirs(yaml_conf, args, index, host, host_count)
return prepare_dirs_via_http(yaml_conf, args, index, host, host_count)
end
function _M.init(env, args)
-- read_yaml_conf
local yaml_conf, err = file.read_yaml_conf(env.apisix_home)
if not yaml_conf then
util.die("failed to read local yaml config of apisix: ", err)
end
if not yaml_conf.apisix then
util.die("failed to read `apisix` field from yaml file when init etcd")
end
if yaml_conf.deployment.config_provider ~= "etcd" then
return true
end
if not yaml_conf.etcd then
util.die("failed to read `etcd` field from yaml file when init etcd")
end
-- convert old single etcd config to multiple etcd config
if type(yaml_conf.etcd.host) == "string" then
yaml_conf.etcd.host = {yaml_conf.etcd.host}
end
local host_count = #(yaml_conf.etcd.host)
local scheme
for i = 1, host_count do
local host = yaml_conf.etcd.host[i]
local fields = util.split(host, "://")
if not fields then
util.die("malformed etcd endpoint: ", host, "\n")
end
if not scheme then
scheme = fields[1]
elseif scheme ~= fields[1] then
print([[WARNING: mixed protocols among etcd endpoints]])
end
end
-- check the etcd cluster version
local etcd_healthy_hosts = {}
for index, host in ipairs(yaml_conf.etcd.host) do
local version_url = host .. "/version"
local errmsg
local res, err
local retry_time = 0
local etcd = yaml_conf.etcd
local max_retry = tonumber(etcd.startup_retry) or 2
while retry_time < max_retry do
res, err = request(version_url, yaml_conf)
-- In case of failure, request returns nil followed by an error message.
-- Else the first return value is the response body
-- and followed by the response status code.
if res then
break
end
retry_time = retry_time + 1
print(str_format("Warning! Request etcd endpoint \'%s\' error, %s, retry time=%s",
version_url, err, retry_time))
end
if res then
local body, _, err = dkjson.decode(res)
if err or (body and not body["etcdcluster"]) then
errmsg = str_format("got malformed version message: \"%s\" from etcd \"%s\"\n", res,
version_url)
util.die(errmsg)
end
local cluster_version = body["etcdcluster"]
if compare_semantic_version(cluster_version, env.min_etcd_version) then
util.die("etcd cluster version ", cluster_version,
" is less than the required version ", env.min_etcd_version,
", please upgrade your etcd cluster\n")
end
table_insert(etcd_healthy_hosts, host)
else
io_stderr:write(str_format("request etcd endpoint \'%s\' error, %s\n", version_url,
err))
end
end
if #etcd_healthy_hosts <= 0 then
util.die("all etcd nodes are unavailable\n")
end
if (#etcd_healthy_hosts / host_count * 100) <= 50 then
util.die("the etcd cluster needs at least 50% and above healthy nodes\n")
end
-- access from the data plane to etcd should be read-only.
-- data plane writes to etcd may cause security issues.
if yaml_conf.deployment.role == "data_plane" then
print("access from the data plane to etcd should be read-only, "
.."skip initializing the data of etcd")
return true
end
print("trying to initialize the data of etcd")
local etcd_ok = false
for index, host in ipairs(etcd_healthy_hosts) do
if prepare_dirs(yaml_conf, args, index, host, host_count) then
etcd_ok = true
break
end
end
if not etcd_ok then
util.die("none of the configured etcd works well\n")
end
end
return _M

View File

@@ -0,0 +1,343 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local ngx = ngx
local yaml = require("lyaml")
local profile = require("apisix.core.profile")
local util = require("apisix.cli.util")
local schema = require("apisix.cli.schema")
local default_conf = require("apisix.cli.config")
local dkjson = require("dkjson")
local pl_path = require("pl.path")
local pairs = pairs
local type = type
local tonumber = tonumber
local getenv = os.getenv
local str_gmatch = string.gmatch
local str_find = string.find
local str_sub = string.sub
local print = print
local _M = {}
local exported_vars
function _M.get_exported_vars()
return exported_vars
end
local function is_empty_yaml_line(line)
return line == '' or str_find(line, '^%s*$') or str_find(line, '^%s*#')
end
local function tab_is_array(t)
local count = 0
for k, v in pairs(t) do
count = count + 1
end
return #t == count
end
local function var_sub(val)
local err
local var_used = false
-- we use '${{var}}' because '$var' and '${var}' are taken
-- by Nginx
local new_val = val:gsub("%$%{%{%s*([%w_]+[%:%=]?.-)%s*%}%}", function(var)
local i, j = var:find("%:%=")
local default
if i and j then
default = var:sub(i + 2, #var)
default = default:gsub('^%s*(.-)%s*$', '%1')
var = var:sub(1, i - 1)
end
local v = getenv(var) or default
if v then
if not exported_vars then
exported_vars = {}
end
exported_vars[var] = v
var_used = true
return v
end
err = "failed to handle configuration: " ..
"can't find environment variable " .. var
return ""
end)
return new_val, var_used, err
end
local function resolve_conf_var(conf)
local new_keys = {}
for key, val in pairs(conf) do
-- avoid re-iterating the table for already iterated key
if new_keys[key] then
goto continue
end
-- substitute environment variables from conf keys
if type(key) == "string" then
local new_key, _, err = var_sub(key)
if err then
return nil, err
end
if new_key ~= key then
new_keys[new_key] = "dummy" -- we only care about checking the key
conf.key = nil
conf[new_key] = val
key = new_key
end
end
if type(val) == "table" then
local ok, err = resolve_conf_var(val)
if not ok then
return nil, err
end
elseif type(val) == "string" then
local new_val, var_used, err = var_sub(val)
if err then
return nil, err
end
if var_used then
if tonumber(new_val) ~= nil then
new_val = tonumber(new_val)
elseif new_val == "true" then
new_val = true
elseif new_val == "false" then
new_val = false
end
end
conf[key] = new_val
end
::continue::
end
return true
end
_M.resolve_conf_var = resolve_conf_var
local function replace_by_reserved_env_vars(conf)
-- TODO: support more reserved environment variables
local v = getenv("APISIX_DEPLOYMENT_ETCD_HOST")
if v and conf["deployment"] and conf["deployment"]["etcd"] then
local val, _, err = dkjson.decode(v)
if err or not val then
print("parse ${APISIX_DEPLOYMENT_ETCD_HOST} failed, error:", err)
return
end
conf["deployment"]["etcd"]["host"] = val
end
end
local function path_is_multi_type(path, type_val)
if str_sub(path, 1, 14) == "nginx_config->" and
(type_val == "number" or type_val == "string") then
return true
end
if path == "apisix->node_listen" and type_val == "number" then
return true
end
if path == "apisix->data_encryption->keyring" then
return true
end
return false
end
local function merge_conf(base, new_tab, ppath)
ppath = ppath or ""
for key, val in pairs(new_tab) do
if type(val) == "table" then
if val == yaml.null then
base[key] = nil
elseif tab_is_array(val) then
base[key] = val
else
if base[key] == nil then
base[key] = {}
end
local ok, err = merge_conf(
base[key],
val,
ppath == "" and key or ppath .. "->" .. key
)
if not ok then
return nil, err
end
end
else
local type_val = type(val)
if base[key] == nil then
base[key] = val
elseif type(base[key]) ~= type_val then
local path = ppath == "" and key or ppath .. "->" .. key
if path_is_multi_type(path, type_val) then
base[key] = val
else
return nil, "failed to merge, path[" .. path .. "] expect: " ..
type(base[key]) .. ", but got: " .. type_val
end
else
base[key] = val
end
end
end
return base
end
function _M.read_yaml_conf(apisix_home)
if apisix_home then
profile.apisix_home = apisix_home .. "/"
end
local local_conf_path = profile:customized_yaml_path()
if not local_conf_path then
local_conf_path = profile:yaml_path("config")
end
local user_conf_yaml, err = util.read_file(local_conf_path)
if not user_conf_yaml then
return nil, err
end
local is_empty_file = true
for line in str_gmatch(user_conf_yaml .. '\n', '(.-)\r?\n') do
if not is_empty_yaml_line(line) then
is_empty_file = false
break
end
end
if not is_empty_file then
local user_conf = yaml.load(user_conf_yaml)
if not user_conf then
return nil, "invalid config.yaml file"
end
local ok, err = resolve_conf_var(user_conf)
if not ok then
return nil, err
end
ok, err = merge_conf(default_conf, user_conf)
if not ok then
return nil, err
end
end
-- fill the default value by the schema
local ok, err = schema.validate(default_conf)
if not ok then
return nil, err
end
if default_conf.deployment then
default_conf.deployment.config_provider = "etcd"
if default_conf.deployment.role == "traditional" then
default_conf.etcd = default_conf.deployment.etcd
if default_conf.deployment.role_traditional.config_provider == "yaml" then
default_conf.deployment.config_provider = "yaml"
end
elseif default_conf.deployment.role == "control_plane" then
default_conf.etcd = default_conf.deployment.etcd
default_conf.apisix.enable_admin = true
elseif default_conf.deployment.role == "data_plane" then
default_conf.etcd = default_conf.deployment.etcd
if default_conf.deployment.role_data_plane.config_provider == "yaml" then
default_conf.deployment.config_provider = "yaml"
elseif default_conf.deployment.role_data_plane.config_provider == "json" then
default_conf.deployment.config_provider = "json"
elseif default_conf.deployment.role_data_plane.config_provider == "xds" then
default_conf.deployment.config_provider = "xds"
end
default_conf.apisix.enable_admin = false
end
end
--- using `not ngx` to check whether the current execution environment is apisix cli module,
--- because it is only necessary to parse and validate `apisix.yaml` in apisix cli.
if default_conf.deployment.config_provider == "yaml" and not ngx then
local apisix_conf_path = profile:yaml_path("apisix")
local apisix_conf_yaml, _ = util.read_file(apisix_conf_path)
if apisix_conf_yaml then
local apisix_conf = yaml.load(apisix_conf_yaml)
if apisix_conf then
local ok, err = resolve_conf_var(apisix_conf)
if not ok then
return nil, err
end
end
end
end
local apisix_ssl = default_conf.apisix.ssl
if apisix_ssl and apisix_ssl.ssl_trusted_certificate then
-- default value is set to "system" during schema validation
if apisix_ssl.ssl_trusted_certificate == "system" then
local trusted_certs_path, err = util.get_system_trusted_certs_filepath()
if not trusted_certs_path then
util.die(err)
end
apisix_ssl.ssl_trusted_certificate = trusted_certs_path
else
-- During validation, the path is relative to PWD
-- When Nginx starts, the path is relative to conf
-- Therefore we need to check the absolute version instead
local cert_path = pl_path.abspath(apisix_ssl.ssl_trusted_certificate)
if not pl_path.exists(cert_path) then
util.die("certificate path", cert_path, "doesn't exist\n")
end
apisix_ssl.ssl_trusted_certificate = cert_path
end
end
replace_by_reserved_env_vars(default_conf)
return default_conf
end
return _M

View File

@@ -0,0 +1,66 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- IP match and verify module.
--
-- @module cli.ip
local mediador_ip = require("resty.mediador.ip")
local setmetatable = setmetatable
local _M = {}
local mt = { __index = _M }
---
-- create a instance of module cli.ip
--
-- @function cli.ip:new
-- @tparam string ip IP or CIDR.
-- @treturn instance of module if the given ip valid, nil and error message otherwise.
function _M.new(self, ip)
if not mediador_ip.valid(ip) then
return nil, "invalid ip"
end
local _ip = mediador_ip.parse(ip)
return setmetatable({ _ip = _ip }, mt)
end
---
-- Is that the given ip loopback?
--
-- @function cli.ip:is_loopback
-- @treturn boolean True if the given ip is the loopback, false otherwise.
function _M.is_loopback(self)
return self._ip and "loopback" == self._ip:range()
end
---
-- Is that the given ip unspecified?
--
-- @function cli.ip:is_unspecified
-- @treturn boolean True if the given ip is all the unspecified, false otherwise.
function _M.is_unspecified(self)
return self._ip and "unspecified" == self._ip:range()
end
return _M

View File

@@ -0,0 +1,998 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
return [=[
# Configuration File - Nginx Server Configs
# This is a read-only file, do not try to modify it.
{% if user and user ~= '' then %}
user {* user *};
{% end %}
master_process on;
worker_processes {* worker_processes *};
{% if os_name == "Linux" and enable_cpu_affinity == true then %}
worker_cpu_affinity auto;
{% end %}
# main configuration snippet starts
{% if main_configuration_snippet then %}
{* main_configuration_snippet *}
{% end %}
# main configuration snippet ends
error_log {* error_log *} {* error_log_level or "warn" *};
pid logs/nginx.pid;
worker_rlimit_nofile {* worker_rlimit_nofile *};
events {
accept_mutex off;
worker_connections {* event.worker_connections *};
}
worker_rlimit_core {* worker_rlimit_core *};
worker_shutdown_timeout {* worker_shutdown_timeout *};
env APISIX_PROFILE;
env PATH; # for searching external plugin runner's binary
# reserved environment variables for configuration
env APISIX_DEPLOYMENT_ETCD_HOST;
{% if envs then %}
{% for _, name in ipairs(envs) do %}
env {*name*};
{% end %}
{% end %}
{% if use_apisix_base then %}
thread_pool grpc-client-nginx-module threads=1;
lua {
{% if enabled_stream_plugins["prometheus"] then %}
lua_shared_dict prometheus-metrics {* meta.lua_shared_dict["prometheus-metrics"] *};
{% end %}
{% if standalone_with_admin_api then %}
lua_shared_dict standalone-config {* meta.lua_shared_dict["standalone-config"] *};
{% end %}
{% if status then %}
lua_shared_dict status-report {* meta.lua_shared_dict["status-report"] *};
{% end %}
lua_shared_dict nacos 10m;
}
{% if enabled_stream_plugins["prometheus"] and not enable_http then %}
http {
lua_package_path "{*extra_lua_path*}$prefix/deps/share/lua/5.1/?.lua;$prefix/deps/share/lua/5.1/?/init.lua;]=]
.. [=[{*apisix_lua_home*}/?.lua;{*apisix_lua_home*}/?/init.lua;;{*lua_path*};";
lua_package_cpath "{*extra_lua_cpath*}$prefix/deps/lib64/lua/5.1/?.so;]=]
.. [=[$prefix/deps/lib/lua/5.1/?.so;;]=]
.. [=[{*lua_cpath*};";
{% if enabled_stream_plugins["prometheus"] then %}
init_by_lua_block {
require "resty.core"
local process = require("ngx.process")
local ok, err = process.enable_privileged_agent()
if not ok then
ngx.log(ngx.ERR, "failed to enable privileged_agent: ", err)
end
}
init_worker_by_lua_block {
require("apisix.plugins.prometheus.exporter").http_init(true)
}
server {
{% if use_apisix_base then %}
listen {* prometheus_server_addr *} enable_process=privileged_agent;
{% else %}
listen {* prometheus_server_addr *};
{% end %}
access_log off;
location / {
content_by_lua_block {
local prometheus = require("apisix.plugins.prometheus.exporter")
prometheus.export_metrics(true)
}
}
location = /apisix/nginx_status {
allow 127.0.0.0/24;
deny all;
stub_status;
}
}
{% end %}
}
{% end %}
{% end %}
{% if enable_stream then %}
stream {
lua_package_path "{*extra_lua_path*}$prefix/deps/share/lua/5.1/?.lua;$prefix/deps/share/lua/5.1/?/init.lua;]=]
.. [=[{*apisix_lua_home*}/?.lua;{*apisix_lua_home*}/?/init.lua;;{*lua_path*};";
lua_package_cpath "{*extra_lua_cpath*}$prefix/deps/lib64/lua/5.1/?.so;]=]
.. [=[$prefix/deps/lib/lua/5.1/?.so;;]=]
.. [=[{*lua_cpath*};";
lua_socket_log_errors off;
{% if max_pending_timers then %}
lua_max_pending_timers {* max_pending_timers *};
{% end %}
{% if max_running_timers then %}
lua_max_running_timers {* max_running_timers *};
{% end %}
lua_shared_dict lrucache-lock-stream {* stream.lua_shared_dict["lrucache-lock-stream"] *};
lua_shared_dict etcd-cluster-health-check-stream {* stream.lua_shared_dict["etcd-cluster-health-check-stream"] *};
lua_shared_dict worker-events-stream {* stream.lua_shared_dict["worker-events-stream"] *};
{% if stream.lua_shared_dict["upstream-healthcheck-stream"] then %}
lua_shared_dict upstream-healthcheck-stream {* stream.lua_shared_dict["upstream-healthcheck-stream"] *};
{% end %}
{% if enabled_discoveries["tars"] then %}
lua_shared_dict tars-stream {* stream.lua_shared_dict["tars-stream"] *};
{% end %}
{% if enabled_stream_plugins["limit-conn"] then %}
lua_shared_dict plugin-limit-conn-stream {* stream.lua_shared_dict["plugin-limit-conn-stream"] *};
{% end %}
# for discovery shared dict
{% if discovery_shared_dicts then %}
{% for key, size in pairs(discovery_shared_dicts) do %}
lua_shared_dict {*key*}-stream {*size*};
{% end %}
{% end %}
resolver {% for _, dns_addr in ipairs(dns_resolver or {}) do %} {*dns_addr*} {% end %} {% if dns_resolver_valid then %} valid={*dns_resolver_valid*}{% end %} ipv6={% if enable_ipv6 then %}on{% else %}off{% end %};
resolver_timeout {*resolver_timeout*};
{% if ssl.ssl_trusted_certificate ~= nil then %}
lua_ssl_trusted_certificate {* ssl.ssl_trusted_certificate *};
{% end %}
# for stream logs, off by default
{% if stream.enable_access_log == true then %}
log_format main escape={* stream.access_log_format_escape *} '{* stream.access_log_format *}';
access_log {* stream.access_log *} main buffer=16384 flush=3;
{% end %}
# stream configuration snippet starts
{% if stream_configuration_snippet then %}
{* stream_configuration_snippet *}
{% end %}
# stream configuration snippet ends
upstream apisix_backend {
server 127.0.0.1:80;
balancer_by_lua_block {
apisix.stream_balancer_phase()
}
}
init_by_lua_block {
require "resty.core"
{% if lua_module_hook then %}
require "{* lua_module_hook *}"
{% end %}
apisix = require("apisix")
local dns_resolver = { {% for _, dns_addr in ipairs(dns_resolver or {}) do %} "{*dns_addr*}", {% end %} }
local args = {
dns_resolver = dns_resolver,
}
apisix.stream_init(args)
}
init_worker_by_lua_block {
apisix.stream_init_worker()
}
{% if (events.module or "") == "lua-resty-events" then %}
# the server block for lua-resty-events
server {
listen unix:{*apisix_lua_home*}/logs/stream_worker_events.sock;
access_log off;
content_by_lua_block {
require("resty.events.compat").run()
}
}
{% end %}
server {
{% for _, item in ipairs(stream_proxy.tcp or {}) do %}
listen {*item.addr*} {% if item.tls then %} ssl {% end %} {% if enable_reuseport then %} reuseport {% end %} {% if proxy_protocol and proxy_protocol.enable_tcp_pp then %} proxy_protocol {% end %};
{% end %}
{% for _, addr in ipairs(stream_proxy.udp or {}) do %}
listen {*addr*} udp {% if enable_reuseport then %} reuseport {% end %};
{% end %}
{% if tcp_enable_ssl then %}
ssl_certificate {* ssl.ssl_cert *};
ssl_certificate_key {* ssl.ssl_cert_key *};
ssl_client_hello_by_lua_block {
apisix.ssl_client_hello_phase()
}
ssl_certificate_by_lua_block {
apisix.ssl_phase()
}
{% end %}
{% if proxy_protocol and proxy_protocol.enable_tcp_pp_to_upstream then %}
proxy_protocol on;
{% end %}
preread_by_lua_block {
apisix.stream_preread_phase()
}
proxy_pass apisix_backend;
{% if use_apisix_base then %}
set $upstream_sni "apisix_backend";
proxy_ssl_server_name on;
proxy_ssl_name $upstream_sni;
{% end %}
log_by_lua_block {
apisix.stream_log_phase()
}
}
}
{% end %}
{% if enable_http then %}
http {
# put extra_lua_path in front of the builtin path
# so user can override the source code
lua_package_path "{*extra_lua_path*}$prefix/deps/share/lua/5.1/?.lua;$prefix/deps/share/lua/5.1/?/init.lua;]=]
.. [=[{*apisix_lua_home*}/?.lua;{*apisix_lua_home*}/?/init.lua;;{*lua_path*};";
lua_package_cpath "{*extra_lua_cpath*}$prefix/deps/lib64/lua/5.1/?.so;]=]
.. [=[$prefix/deps/lib/lua/5.1/?.so;;]=]
.. [=[{*lua_cpath*};";
{% if max_pending_timers then %}
lua_max_pending_timers {* max_pending_timers *};
{% end %}
{% if max_running_timers then %}
lua_max_running_timers {* max_running_timers *};
{% end %}
lua_shared_dict internal-status {* http.lua_shared_dict["internal-status"] *};
lua_shared_dict upstream-healthcheck {* http.lua_shared_dict["upstream-healthcheck"] *};
lua_shared_dict worker-events {* http.lua_shared_dict["worker-events"] *};
lua_shared_dict lrucache-lock {* http.lua_shared_dict["lrucache-lock"] *};
lua_shared_dict balancer-ewma {* http.lua_shared_dict["balancer-ewma"] *};
lua_shared_dict balancer-ewma-locks {* http.lua_shared_dict["balancer-ewma-locks"] *};
lua_shared_dict balancer-ewma-last-touched-at {* http.lua_shared_dict["balancer-ewma-last-touched-at"] *};
lua_shared_dict etcd-cluster-health-check {* http.lua_shared_dict["etcd-cluster-health-check"] *}; # etcd health check
# for discovery shared dict
{% if discovery_shared_dicts then %}
{% for key, size in pairs(discovery_shared_dicts) do %}
lua_shared_dict {*key*} {*size*};
{% end %}
{% end %}
{% if enabled_discoveries["tars"] then %}
lua_shared_dict tars {* http.lua_shared_dict["tars"] *};
{% end %}
{% if http.lua_shared_dict["plugin-ai-rate-limiting"] then %}
lua_shared_dict plugin-ai-rate-limiting {* http.lua_shared_dict["plugin-ai-rate-limiting"] *};
{% else %}
lua_shared_dict plugin-ai-rate-limiting 10m;
{% end %}
{% if http.lua_shared_dict["plugin-ai-rate-limiting"] then %}
lua_shared_dict plugin-ai-rate-limiting-reset-header {* http.lua_shared_dict["plugin-ai-rate-limiting-reset-header"] *};
{% else %}
lua_shared_dict plugin-ai-rate-limiting-reset-header 10m;
{% end %}
{% if enabled_plugins["limit-conn"] then %}
lua_shared_dict plugin-limit-conn {* http.lua_shared_dict["plugin-limit-conn"] *};
lua_shared_dict plugin-limit-conn-redis-cluster-slot-lock {* http.lua_shared_dict["plugin-limit-conn-redis-cluster-slot-lock"] *};
{% end %}
{% if enabled_plugins["limit-req"] then %}
lua_shared_dict plugin-limit-req-redis-cluster-slot-lock {* http.lua_shared_dict["plugin-limit-req-redis-cluster-slot-lock"] *};
lua_shared_dict plugin-limit-req {* http.lua_shared_dict["plugin-limit-req"] *};
{% end %}
{% if enabled_plugins["limit-count"] then %}
lua_shared_dict plugin-limit-count {* http.lua_shared_dict["plugin-limit-count"] *};
lua_shared_dict plugin-limit-count-redis-cluster-slot-lock {* http.lua_shared_dict["plugin-limit-count-redis-cluster-slot-lock"] *};
lua_shared_dict plugin-limit-count-reset-header {* http.lua_shared_dict["plugin-limit-count"] *};
{% end %}
{% if enabled_plugins["prometheus"] and not enabled_stream_plugins["prometheus"] then %}
lua_shared_dict prometheus-metrics {* http.lua_shared_dict["prometheus-metrics"] *};
{% end %}
{% if enabled_plugins["skywalking"] then %}
lua_shared_dict tracing_buffer {* http.lua_shared_dict.tracing_buffer *}; # plugin: skywalking
{% end %}
{% if enabled_plugins["api-breaker"] then %}
lua_shared_dict plugin-api-breaker {* http.lua_shared_dict["plugin-api-breaker"] *};
{% end %}
{% if enabled_plugins["openid-connect"] or enabled_plugins["authz-keycloak"] then %}
# for openid-connect and authz-keycloak plugin
lua_shared_dict discovery {* http.lua_shared_dict["discovery"] *}; # cache for discovery metadata documents
{% end %}
{% if enabled_plugins["openid-connect"] then %}
# for openid-connect plugin
lua_shared_dict jwks {* http.lua_shared_dict["jwks"] *}; # cache for JWKs
lua_shared_dict introspection {* http.lua_shared_dict["introspection"] *}; # cache for JWT verification results
{% end %}
{% if enabled_plugins["cas-auth"] then %}
lua_shared_dict cas_sessions {* http.lua_shared_dict["cas-auth"] *};
{% end %}
{% if enabled_plugins["authz-keycloak"] then %}
# for authz-keycloak
lua_shared_dict access-tokens {* http.lua_shared_dict["access-tokens"] *}; # cache for service account access tokens
{% end %}
{% if enabled_plugins["ocsp-stapling"] then %}
lua_shared_dict ocsp-stapling {* http.lua_shared_dict["ocsp-stapling"] *}; # cache for ocsp-stapling
{% end %}
{% if enabled_plugins["ext-plugin-pre-req"] or enabled_plugins["ext-plugin-post-req"] then %}
lua_shared_dict ext-plugin {* http.lua_shared_dict["ext-plugin"] *}; # cache for ext-plugin
{% end %}
{% if enabled_plugins["mcp-bridge"] then %}
lua_shared_dict mcp-session {* http.lua_shared_dict["mcp-session"] *}; # cache for mcp-session
{% end %}
{% if config_center == "xds" then %}
lua_shared_dict xds-config 10m;
lua_shared_dict xds-config-version 1m;
{% end %}
# for custom shared dict
{% if http.custom_lua_shared_dict then %}
{% for cache_key, cache_size in pairs(http.custom_lua_shared_dict) do %}
lua_shared_dict {*cache_key*} {*cache_size*};
{% end %}
{% end %}
{% if enabled_plugins["error-log-logger"] then %}
lua_capture_error_log 10m;
{% end %}
lua_ssl_verify_depth 5;
ssl_session_timeout 86400;
{% if http.underscores_in_headers then %}
underscores_in_headers {* http.underscores_in_headers *};
{%end%}
lua_socket_log_errors off;
resolver {% for _, dns_addr in ipairs(dns_resolver or {}) do %} {*dns_addr*} {% end %} {% if dns_resolver_valid then %} valid={*dns_resolver_valid*}{% end %} ipv6={% if enable_ipv6 then %}on{% else %}off{% end %};
resolver_timeout {*resolver_timeout*};
lua_http10_buffering off;
lua_regex_match_limit 100000;
lua_regex_cache_max_entries 8192;
{% if http.enable_access_log == false then %}
access_log off;
{% else %}
log_format main escape={* http.access_log_format_escape *} '{* http.access_log_format *}';
uninitialized_variable_warn off;
{% if http.access_log_buffer then %}
access_log {* http.access_log *} main buffer={* http.access_log_buffer *} flush=3;
{% else %}
access_log {* http.access_log *} main buffer=16384 flush=3;
{% end %}
{% end %}
open_file_cache max=1000 inactive=60;
client_max_body_size {* http.client_max_body_size *};
keepalive_timeout {* http.keepalive_timeout *};
client_header_timeout {* http.client_header_timeout *};
client_body_timeout {* http.client_body_timeout *};
send_timeout {* http.send_timeout *};
variables_hash_max_size {* http.variables_hash_max_size *};
server_tokens off;
include mime.types;
charset {* http.charset *};
{% if http.real_ip_header then %}
real_ip_header {* http.real_ip_header *};
{% end %}
{% if http.real_ip_recursive then %}
real_ip_recursive {* http.real_ip_recursive *};
{% end %}
{% if http.real_ip_from then %}
{% for _, real_ip in ipairs(http.real_ip_from) do %}
set_real_ip_from {*real_ip*};
{% end %}
{% end %}
{% if ssl.ssl_trusted_certificate ~= nil then %}
lua_ssl_trusted_certificate {* ssl.ssl_trusted_certificate *};
{% end %}
# http configuration snippet starts
{% if http_configuration_snippet then %}
{* http_configuration_snippet *}
{% end %}
# http configuration snippet ends
upstream apisix_backend {
server 0.0.0.1;
{% if use_apisix_base then %}
keepalive {* http.upstream.keepalive *};
keepalive_requests {* http.upstream.keepalive_requests *};
keepalive_timeout {* http.upstream.keepalive_timeout *};
# we put the static configuration above so that we can override it in the Lua code
balancer_by_lua_block {
apisix.http_balancer_phase()
}
{% else %}
balancer_by_lua_block {
apisix.http_balancer_phase()
}
keepalive {* http.upstream.keepalive *};
keepalive_requests {* http.upstream.keepalive_requests *};
keepalive_timeout {* http.upstream.keepalive_timeout *};
{% end %}
}
{% if enabled_plugins["dubbo-proxy"] then %}
upstream apisix_dubbo_backend {
server 0.0.0.1;
balancer_by_lua_block {
apisix.http_balancer_phase()
}
# dynamical keepalive doesn't work with dubbo as the connection here
# is managed by ngx_multi_upstream_module
multi {* dubbo_upstream_multiplex_count *};
keepalive {* http.upstream.keepalive *};
keepalive_requests {* http.upstream.keepalive_requests *};
keepalive_timeout {* http.upstream.keepalive_timeout *};
}
{% end %}
{% if use_apisix_base then %}
apisix_delay_client_max_body_check on;
apisix_mirror_on_demand on;
{% end %}
{% if wasm then %}
wasm_vm wasmtime;
{% end %}
init_by_lua_block {
require "resty.core"
{% if lua_module_hook then %}
require "{* lua_module_hook *}"
{% end %}
apisix = require("apisix")
local dns_resolver = { {% for _, dns_addr in ipairs(dns_resolver or {}) do %} "{*dns_addr*}", {% end %} }
local args = {
dns_resolver = dns_resolver,
}
apisix.http_init(args)
-- set apisix_lua_home into constants module
-- it may be used by plugins to determine the work path of apisix
local constants = require("apisix.constants")
constants.apisix_lua_home = "{*apisix_lua_home*}"
}
init_worker_by_lua_block {
apisix.http_init_worker()
}
exit_worker_by_lua_block {
apisix.http_exit_worker()
}
{% if (events.module or "") == "lua-resty-events" then %}
# the server block for lua-resty-events
server {
listen unix:{*apisix_lua_home*}/logs/worker_events.sock;
access_log off;
location / {
content_by_lua_block {
require("resty.events.compat").run()
}
}
}
{% end %}
{% if enable_control then %}
server {
listen {* control_server_addr *};
access_log off;
location / {
content_by_lua_block {
apisix.http_control()
}
}
}
{% end %}
{% if status then %}
server {
listen {* status_server_addr *} enable_process=privileged_agent;
access_log off;
location /status {
content_by_lua_block {
apisix.status()
}
}
location /status/ready {
content_by_lua_block {
apisix.status_ready()
}
}
}
{% end %}
{% if enabled_plugins["prometheus"] and prometheus_server_addr then %}
server {
{% if use_apisix_base then %}
listen {* prometheus_server_addr *} enable_process=privileged_agent;
{% else %}
listen {* prometheus_server_addr *};
{% end %}
access_log off;
location / {
content_by_lua_block {
local prometheus = require("apisix.plugins.prometheus.exporter")
prometheus.export_metrics()
}
}
location = /apisix/nginx_status {
allow 127.0.0.0/24;
deny all;
stub_status;
}
}
{% end %}
{% if enable_admin then %}
server {
{%if https_admin then%}
listen {* admin_server_addr *} ssl;
ssl_certificate {* admin_api_mtls.admin_ssl_cert *};
ssl_certificate_key {* admin_api_mtls.admin_ssl_cert_key *};
{%if admin_api_mtls.admin_ssl_ca_cert and admin_api_mtls.admin_ssl_ca_cert ~= "" then%}
ssl_verify_client on;
ssl_client_certificate {* admin_api_mtls.admin_ssl_ca_cert *};
{% end %}
ssl_session_cache shared:SSL:20m;
ssl_protocols {* ssl.ssl_protocols *};
ssl_ciphers {* ssl.ssl_ciphers *};
ssl_prefer_server_ciphers on;
{% if ssl.ssl_session_tickets then %}
ssl_session_tickets on;
{% else %}
ssl_session_tickets off;
{% end %}
{% else %}
listen {* admin_server_addr *};
{%end%}
log_not_found off;
# admin configuration snippet starts
{% if http_admin_configuration_snippet then %}
{* http_admin_configuration_snippet *}
{% end %}
# admin configuration snippet ends
set $upstream_scheme 'http';
set $upstream_host $http_host;
set $upstream_uri '';
{%if allow_admin then%}
{% for _, allow_ip in ipairs(allow_admin) do %}
allow {*allow_ip*};
{% end %}
deny all;
{%else%}
allow all;
{%end%}
location /apisix/admin {
content_by_lua_block {
apisix.http_admin()
}
}
{% if enable_admin_ui then %}
location = /ui {
return 301 /ui/;
}
location ^~ /ui/ {
rewrite ^/ui/(.*)$ /$1 break;
root {* apisix_lua_home *}/ui;
try_files $uri /index.html =404;
gzip on;
gzip_types text/css application/javascript application/json;
expires 7200s;
add_header Cache-Control "private,max-age=7200";
}
{% end %}
}
{% end %}
{% if deployment_role ~= "control_plane" then %}
{% if enabled_plugins["proxy-cache"] then %}
# for proxy cache
{% for _, cache in ipairs(proxy_cache.zones) do %}
{% if cache.disk_path and cache.cache_levels and cache.disk_size then %}
proxy_cache_path {* cache.disk_path *} levels={* cache.cache_levels *} keys_zone={* cache.name *}:{* cache.memory_size *} inactive=1d max_size={* cache.disk_size *} use_temp_path=off;
{% else %}
lua_shared_dict {* cache.name *} {* cache.memory_size *};
{% end %}
{% end %}
map $upstream_cache_zone $upstream_cache_zone_info {
{% for _, cache in ipairs(proxy_cache.zones) do %}
{% if cache.disk_path and cache.cache_levels and cache.disk_size then %}
{* cache.name *} {* cache.disk_path *},{* cache.cache_levels *};
{% end %}
{% end %}
}
{% end %}
server {
{% if enable_http2 then %}
http2 on;
{% end %}
{% if enable_http3_in_server_context then %}
http3 on;
{% end %}
{% for _, item in ipairs(node_listen) do %}
listen {* item.ip *}:{* item.port *} default_server {% if enable_reuseport then %} reuseport {% end %};
{% end %}
{% if ssl.enable then %}
{% for _, item in ipairs(ssl.listen) do %}
{% if item.enable_http3 then %}
listen {* item.ip *}:{* item.port *} quic default_server {% if enable_reuseport then %} reuseport {% end %};
listen {* item.ip *}:{* item.port *} ssl default_server;
{% else %}
listen {* item.ip *}:{* item.port *} ssl default_server {% if enable_reuseport then %} reuseport {% end %};
{% end %}
{% end %}
{% end %}
{% if proxy_protocol and proxy_protocol.listen_http_port then %}
listen {* proxy_protocol.listen_http_port *} default_server proxy_protocol;
{% end %}
{% if proxy_protocol and proxy_protocol.listen_https_port then %}
listen {* proxy_protocol.listen_https_port *} ssl default_server proxy_protocol;
{% end %}
server_name _;
{% if ssl.enable then %}
ssl_certificate {* ssl.ssl_cert *};
ssl_certificate_key {* ssl.ssl_cert_key *};
ssl_session_cache shared:SSL:20m;
ssl_session_timeout 10m;
ssl_protocols {* ssl.ssl_protocols *};
ssl_ciphers {* ssl.ssl_ciphers *};
ssl_prefer_server_ciphers on;
{% if ssl.ssl_session_tickets then %}
ssl_session_tickets on;
{% else %}
ssl_session_tickets off;
{% end %}
{% end %}
{% if ssl.ssl_trusted_certificate ~= nil then %}
proxy_ssl_trusted_certificate {* ssl.ssl_trusted_certificate *};
{% end %}
# opentelemetry_set_ngx_var starts
{% if opentelemetry_set_ngx_var then %}
set $opentelemetry_context_traceparent '';
set $opentelemetry_trace_id '';
set $opentelemetry_span_id '';
{% end %}
# opentelemetry_set_ngx_var ends
# zipkin_set_ngx_var starts
{% if zipkin_set_ngx_var then %}
set $zipkin_context_traceparent '';
set $zipkin_trace_id '';
set $zipkin_span_id '';
{% end %}
# zipkin_set_ngx_var ends
# http server configuration snippet starts
{% if http_server_configuration_snippet then %}
{* http_server_configuration_snippet *}
{% end %}
# http server configuration snippet ends
location = /apisix/nginx_status {
allow 127.0.0.0/24;
deny all;
access_log off;
stub_status;
}
{% if ssl.enable then %}
ssl_client_hello_by_lua_block {
apisix.ssl_client_hello_phase()
}
ssl_certificate_by_lua_block {
apisix.ssl_phase()
}
{% end %}
{% if http.proxy_ssl_server_name then %}
proxy_ssl_name $upstream_host;
proxy_ssl_server_name on;
{% end %}
location / {
set $upstream_mirror_host '';
set $upstream_mirror_uri '';
set $upstream_upgrade '';
set $upstream_connection '';
set $upstream_scheme 'http';
set $upstream_host $http_host;
set $upstream_uri '';
set $ctx_ref '';
{% if wasm then %}
set $wasm_process_req_body '';
set $wasm_process_resp_body '';
{% end %}
# http server location configuration snippet starts
{% if http_server_location_configuration_snippet then %}
{* http_server_location_configuration_snippet *}
{% end %}
# http server location configuration snippet ends
{% if enabled_plugins["dubbo-proxy"] then %}
set $dubbo_service_name '';
set $dubbo_service_version '';
set $dubbo_method '';
{% end %}
access_by_lua_block {
apisix.http_access_phase()
}
proxy_http_version 1.1;
proxy_set_header Host $upstream_host;
proxy_set_header Upgrade $upstream_upgrade;
proxy_set_header Connection $upstream_connection;
proxy_set_header X-Real-IP $remote_addr;
proxy_pass_header Date;
### the following x-forwarded-* headers is to send to upstream server
set $var_x_forwarded_proto $scheme;
set $var_x_forwarded_host $host;
set $var_x_forwarded_port $server_port;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $var_x_forwarded_proto;
proxy_set_header X-Forwarded-Host $var_x_forwarded_host;
proxy_set_header X-Forwarded-Port $var_x_forwarded_port;
{% if enabled_plugins["proxy-cache"] then %}
### the following configuration is to cache response content from upstream server
set $upstream_cache_zone off;
set $upstream_cache_key '';
set $upstream_cache_bypass '';
set $upstream_no_cache '';
proxy_cache $upstream_cache_zone;
proxy_cache_valid any {% if proxy_cache.cache_ttl then %} {* proxy_cache.cache_ttl *} {% else %} 10s {% end %};
proxy_cache_min_uses 1;
proxy_cache_methods GET HEAD POST;
proxy_cache_lock_timeout 5s;
proxy_cache_use_stale off;
proxy_cache_key $upstream_cache_key;
proxy_no_cache $upstream_no_cache;
proxy_cache_bypass $upstream_cache_bypass;
{% end %}
proxy_pass $upstream_scheme://apisix_backend$upstream_uri;
{% if enabled_plugins["proxy-mirror"] then %}
mirror /proxy_mirror;
{% end %}
header_filter_by_lua_block {
apisix.http_header_filter_phase()
}
body_filter_by_lua_block {
apisix.http_body_filter_phase()
}
log_by_lua_block {
apisix.http_log_phase()
}
}
location @grpc_pass {
access_by_lua_block {
apisix.grpc_access_phase()
}
{% if use_apisix_base then %}
# For servers which obey the standard, when `:authority` is missing,
# `host` will be used instead. When used with apisix-runtime, we can do
# better by setting `:authority` directly
grpc_set_header ":authority" $upstream_host;
{% else %}
grpc_set_header "Host" $upstream_host;
{% end %}
grpc_set_header Content-Type application/grpc;
grpc_set_header TE trailers;
grpc_socket_keepalive on;
grpc_pass $upstream_scheme://apisix_backend;
{% if enabled_plugins["proxy-mirror"] then %}
mirror /proxy_mirror_grpc;
{% end %}
header_filter_by_lua_block {
apisix.http_header_filter_phase()
}
body_filter_by_lua_block {
apisix.http_body_filter_phase()
}
log_by_lua_block {
apisix.http_log_phase()
}
}
{% if enabled_plugins["dubbo-proxy"] then %}
location @dubbo_pass {
access_by_lua_block {
apisix.dubbo_access_phase()
}
dubbo_pass_all_headers on;
dubbo_pass_body on;
dubbo_pass $dubbo_service_name $dubbo_service_version $dubbo_method apisix_dubbo_backend;
header_filter_by_lua_block {
apisix.http_header_filter_phase()
}
body_filter_by_lua_block {
apisix.http_body_filter_phase()
}
log_by_lua_block {
apisix.http_log_phase()
}
}
{% end %}
{% if enabled_plugins["proxy-mirror"] then %}
location = /proxy_mirror {
internal;
{% if not use_apisix_base then %}
if ($upstream_mirror_uri = "") {
return 200;
}
{% end %}
{% if proxy_mirror_timeouts then %}
{% if proxy_mirror_timeouts.connect then %}
proxy_connect_timeout {* proxy_mirror_timeouts.connect *};
{% end %}
{% if proxy_mirror_timeouts.read then %}
proxy_read_timeout {* proxy_mirror_timeouts.read *};
{% end %}
{% if proxy_mirror_timeouts.send then %}
proxy_send_timeout {* proxy_mirror_timeouts.send *};
{% end %}
{% end %}
proxy_http_version 1.1;
proxy_set_header Host $upstream_host;
proxy_pass $upstream_mirror_uri;
}
{% end %}
{% if enabled_plugins["proxy-mirror"] then %}
location = /proxy_mirror_grpc {
internal;
{% if not use_apisix_base then %}
if ($upstream_mirror_uri = "") {
return 200;
}
{% end %}
{% if proxy_mirror_timeouts then %}
{% if proxy_mirror_timeouts.connect then %}
grpc_connect_timeout {* proxy_mirror_timeouts.connect *};
{% end %}
{% if proxy_mirror_timeouts.read then %}
grpc_read_timeout {* proxy_mirror_timeouts.read *};
{% end %}
{% if proxy_mirror_timeouts.send then %}
grpc_send_timeout {* proxy_mirror_timeouts.send *};
{% end %}
{% end %}
grpc_pass $upstream_mirror_host;
}
{% end %}
}
{% end %}
# http end configuration snippet starts
{% if http_end_configuration_snippet then %}
{* http_end_configuration_snippet *}
{% end %}
# http end configuration snippet ends
}
{% end %}
]=]

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,450 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local jsonschema = require("jsonschema")
local pairs = pairs
local pcall = pcall
local require = require
local _M = {}
local etcd_schema = {
type = "object",
properties = {
resync_delay = {
type = "integer",
},
user = {
type = "string",
},
password = {
type = "string",
},
tls = {
type = "object",
properties = {
cert = {
type = "string",
},
key = {
type = "string",
},
},
},
prefix = {
type = "string",
},
host = {
type = "array",
items = {
type = "string",
pattern = [[^https?://]]
},
minItems = 1,
},
timeout = {
type = "integer",
default = 30,
minimum = 1,
description = "etcd connection timeout in seconds",
},
},
required = {"prefix", "host"}
}
local config_schema = {
type = "object",
properties = {
apisix = {
properties = {
lua_module_hook = {
pattern = "^[a-zA-Z._-]+$",
},
proxy_protocol = {
type = "object",
properties = {
listen_http_port = {
type = "integer",
},
listen_https_port = {
type = "integer",
},
enable_tcp_pp = {
type = "boolean",
},
enable_tcp_pp_to_upstream = {
type = "boolean",
},
}
},
proxy_cache = {
type = "object",
properties = {
zones = {
type = "array",
minItems = 1,
items = {
type = "object",
properties = {
name = {
type = "string",
},
memory_size = {
type = "string",
},
disk_size = {
type = "string",
},
disk_path = {
type = "string",
},
cache_levels = {
type = "string",
},
},
oneOf = {
{
required = {"name", "memory_size"},
maxProperties = 2,
},
{
required = {"name", "memory_size", "disk_size",
"disk_path", "cache_levels"},
}
},
},
uniqueItems = true,
}
}
},
proxy_mode = {
type = "string",
enum = {"http", "stream", "http&stream"},
},
stream_proxy = {
type = "object",
properties = {
tcp = {
type = "array",
minItems = 1,
items = {
anyOf = {
{
type = "integer",
},
{
type = "string",
},
{
type = "object",
properties = {
addr = {
anyOf = {
{
type = "integer",
},
{
type = "string",
},
}
},
tls = {
type = "boolean",
}
},
required = {"addr"}
},
},
},
uniqueItems = true,
},
udp = {
type = "array",
minItems = 1,
items = {
anyOf = {
{
type = "integer",
},
{
type = "string",
},
},
},
uniqueItems = true,
},
}
},
dns_resolver = {
type = "array",
minItems = 1,
items = {
type = "string",
}
},
dns_resolver_valid = {
type = "integer",
},
enable_http2 = {
type = "boolean",
default = true
},
ssl = {
type = "object",
properties = {
ssl_trusted_certificate = {
type = "string",
default = "system"
},
listen = {
type = "array",
items = {
type = "object",
properties = {
ip = {
type = "string",
},
port = {
type = "integer",
minimum = 1,
maximum = 65535
},
enable_http3 = {
type = "boolean",
},
}
}
},
}
},
data_encryption = {
type = "object",
properties = {
keyring = {
anyOf = {
{
type = "array",
minItems = 1,
items = {
type = "string",
minLength = 16,
maxLength = 16
}
},
{
type = "string",
minLength = 16,
maxLength = 16
}
}
},
}
},
}
},
nginx_config = {
type = "object",
properties = {
envs = {
type = "array",
minItems = 1,
items = {
type = "string",
}
}
},
},
http = {
type = "object",
properties = {
custom_lua_shared_dict = {
type = "object",
}
}
},
etcd = etcd_schema,
plugins = {
type = "array",
default = {},
minItems = 0,
items = {
type = "string"
}
},
stream_plugins = {
type = "array",
default = {},
minItems = 0,
items = {
type = "string"
}
},
wasm = {
type = "object",
properties = {
plugins = {
type = "array",
minItems = 1,
items = {
type = "object",
properties = {
name = {
type = "string"
},
file = {
type = "string"
},
priority = {
type = "integer"
},
http_request_phase = {
enum = {"access", "rewrite"},
default = "access",
},
},
required = {"name", "file", "priority"}
}
}
}
},
deployment = {
type = "object",
properties = {
role = {
enum = {"traditional", "control_plane", "data_plane", "standalone"},
default = "traditional"
}
},
},
},
required = {"apisix", "deployment"},
}
local admin_schema = {
type = "object",
properties = {
admin_key = {
type = "array",
properties = {
items = {
properties = {
name = {type = "string"},
key = {type = "string"},
role = {type = "string"},
}
}
}
},
admin_listen = {
properties = {
listen = { type = "string" },
port = { type = "integer" },
},
default = {
listen = "0.0.0.0",
port = 9180,
}
},
https_admin = {
type = "boolean",
},
admin_key_required = {
type = "boolean",
},
}
}
local deployment_schema = {
traditional = {
properties = {
etcd = etcd_schema,
admin = admin_schema,
role_traditional = {
properties = {
config_provider = {
enum = {"etcd", "yaml"}
},
},
required = {"config_provider"}
}
},
required = {"etcd"}
},
control_plane = {
properties = {
etcd = etcd_schema,
admin = admin_schema,
role_control_plane = {
properties = {
config_provider = {
enum = {"etcd"}
},
},
required = {"config_provider"}
},
},
required = {"etcd", "role_control_plane"}
},
data_plane = {
properties = {
etcd = etcd_schema,
role_data_plane = {
properties = {
config_provider = {
enum = {"etcd", "yaml", "json", "xds"}
},
},
required = {"config_provider"}
},
},
required = {"role_data_plane"}
}
}
function _M.validate(yaml_conf)
local validator = jsonschema.generate_validator(config_schema)
local ok, err = validator(yaml_conf)
if not ok then
return false, "failed to validate config: " .. err
end
if yaml_conf.discovery then
for kind, conf in pairs(yaml_conf.discovery) do
local ok, schema = pcall(require, "apisix.discovery." .. kind .. ".schema")
if ok then
local validator = jsonschema.generate_validator(schema)
local ok, err = validator(conf)
if not ok then
return false, "invalid discovery " .. kind .. " configuration: " .. err
end
end
end
end
local role = yaml_conf.deployment.role
local validator = jsonschema.generate_validator(deployment_schema[role])
local ok, err = validator(yaml_conf.deployment)
if not ok then
return false, "invalid deployment " .. role .. " configuration: " .. err
end
return true
end
return _M

View File

@@ -0,0 +1,189 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local require = require
local pcall = pcall
local open = io.open
local popen = io.popen
local close = io.close
local exit = os.exit
local stderr = io.stderr
local str_format = string.format
local tonumber = tonumber
local io = io
local ipairs = ipairs
local assert = assert
local _M = {}
-- Note: The `execute_cmd` return value will have a line break at the end,
-- it is recommended to use the `trim` function to handle the return value.
local function execute_cmd(cmd)
local t, err = popen(cmd)
if not t then
return nil, "failed to execute command: "
.. cmd .. ", error info: " .. err
end
local data, err = t:read("*all")
t:close()
if not data then
return nil, "failed to read execution result of: "
.. cmd .. ", error info: " .. err
end
return data
end
_M.execute_cmd = execute_cmd
-- For commands which stdout would be always be empty,
-- forward stderr to stdout to get the error msg
function _M.execute_cmd_with_error(cmd)
return execute_cmd(cmd .. " 2>&1")
end
function _M.trim(s)
return (s:gsub("^%s*(.-)%s*$", "%1"))
end
function _M.split(self, sep)
local sep, fields = sep or ":", {}
local pattern = str_format("([^%s]+)", sep)
self:gsub(pattern, function(c) fields[#fields + 1] = c end)
return fields
end
function _M.read_file(file_path)
local file, err = open(file_path, "rb")
if not file then
return false, "failed to open file: " .. file_path .. ", error info:" .. err
end
local data, err = file:read("*all")
file:close()
if not data then
return false, "failed to read file: " .. file_path .. ", error info:" .. err
end
return data
end
function _M.die(...)
stderr:write(...)
exit(1)
end
function _M.is_32bit_arch()
local ok, ffi = pcall(require, "ffi")
if ok then
-- LuaJIT
return ffi.abi("32bit")
end
local ret = _M.execute_cmd("getconf LONG_BIT")
local bits = tonumber(ret)
return bits <= 32
end
function _M.write_file(file_path, data)
local file, err = open(file_path, "w+")
if not file then
return false, "failed to open file: "
.. file_path
.. ", error info:"
.. err
end
local ok, err = file:write(data)
file:close()
if not ok then
return false, "failed to write file: "
.. file_path
.. ", error info:"
.. err
end
return true
end
function _M.file_exists(file_path)
local f = open(file_path, "r")
return f ~= nil and close(f)
end
do
local trusted_certs_paths = {
"/etc/ssl/certs/ca-certificates.crt", -- Debian/Ubuntu/Gentoo
"/etc/pki/tls/certs/ca-bundle.crt", -- Fedora/RHEL 6
"/etc/ssl/ca-bundle.pem", -- OpenSUSE
"/etc/pki/tls/cacert.pem", -- OpenELEC
"/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", -- CentOS/RHEL 7
"/etc/ssl/cert.pem", -- OpenBSD, Alpine
}
-- Check if a file exists using Lua's built-in `io.open`
local function file_exists(path)
local file = io.open(path, "r")
if file then
file:close()
return true
else
return false
end
end
function _M.get_system_trusted_certs_filepath()
for _, path in ipairs(trusted_certs_paths) do
if file_exists(path) then
return path
end
end
return nil,
"Could not find trusted certs file in " ..
"any of the `system`-predefined locations. " ..
"Please install a certs file there or set " ..
"`lua_ssl_trusted_certificate` to a " ..
"specific file path instead of `system`"
end
end
function _M.gen_trusted_certs_combined_file(combined_filepath, paths)
local combined_file = assert(io.open(combined_filepath, "w"))
for _, path in ipairs(paths) do
local cert_file = assert(io.open(path, "r"))
combined_file:write(cert_file:read("*a"))
combined_file:write("\n")
cert_file:close()
end
combined_file:close()
end
return _M

View File

@@ -0,0 +1,46 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
return {
RPC_ERROR = 0,
RPC_PREPARE_CONF = 1,
RPC_HTTP_REQ_CALL = 2,
RPC_EXTRA_INFO = 3,
RPC_HTTP_RESP_CALL = 4,
HTTP_ETCD_DIRECTORY = {
["/upstreams"] = true,
["/plugins"] = true,
["/ssls"] = true,
["/stream_routes"] = true,
["/plugin_metadata"] = true,
["/routes"] = true,
["/services"] = true,
["/consumers"] = true,
["/global_rules"] = true,
["/protos"] = true,
["/plugin_configs"] = true,
["/consumer_groups"] = true,
["/secrets"] = true,
},
STREAM_ETCD_DIRECTORY = {
["/upstreams"] = true,
["/services"] = true,
["/plugins"] = true,
["/ssls"] = true,
["/stream_routes"] = true,
["/plugin_metadata"] = true,
},
}

View File

@@ -0,0 +1,334 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local config_local = require("apisix.core.config_local")
local secret = require("apisix.secret")
local plugin = require("apisix.plugin")
local plugin_checker = require("apisix.plugin").plugin_checker
local check_schema = require("apisix.core.schema").check
local error = error
local ipairs = ipairs
local pairs = pairs
local type = type
local string_sub = string.sub
local consumers
local _M = {
version = 0.3,
}
local lrucache = core.lrucache.new({
ttl = 300, count = 512
})
-- Please calculate and set the value of the "consumers_count_for_lrucache"
-- variable based on the number of consumers in the current environment,
-- taking into account the appropriate adjustment coefficient.
local consumers_count_for_lrucache = 4096
local function remove_etcd_prefix(key)
local prefix = ""
local local_conf = config_local.local_conf()
local role = core.table.try_read_attr(local_conf, "deployment", "role")
local provider = core.table.try_read_attr(local_conf, "deployment", "role_" ..
role, "config_provider")
if provider == "etcd" and local_conf.etcd and local_conf.etcd.prefix then
prefix = local_conf.etcd.prefix
end
return string_sub(key, #prefix + 1)
end
-- /{etcd.prefix}/consumers/{consumer_name}/credentials/{credential_id} --> {consumer_name}
local function get_consumer_name_from_credential_etcd_key(key)
local uri_segs = core.utils.split_uri(remove_etcd_prefix(key))
return uri_segs[3]
end
local function is_credential_etcd_key(key)
if not key then
return false
end
local uri_segs = core.utils.split_uri(remove_etcd_prefix(key))
return uri_segs[2] == "consumers" and uri_segs[4] == "credentials"
end
local function get_credential_id_from_etcd_key(key)
local uri_segs = core.utils.split_uri(remove_etcd_prefix(key))
return uri_segs[5]
end
local function filter_consumers_list(data_list)
if #data_list == 0 then
return data_list
end
local list = {}
for _, item in ipairs(data_list) do
if not (type(item) == "table" and is_credential_etcd_key(item.key)) then
core.table.insert(list, item)
end
end
return list
end
local plugin_consumer
do
local consumers_id_lrucache = core.lrucache.new({
count = consumers_count_for_lrucache
})
local function construct_consumer_data(val, plugin_config)
-- if the val is a Consumer, clone it to the local consumer;
-- if the val is a Credential, to get the Consumer by consumer_name and then clone
-- it to the local consumer.
local consumer
if is_credential_etcd_key(val.key) then
local consumer_name = get_consumer_name_from_credential_etcd_key(val.key)
local the_consumer = consumers:get(consumer_name)
if the_consumer and the_consumer.value then
consumer = core.table.clone(the_consumer.value)
consumer.modifiedIndex = the_consumer.modifiedIndex
consumer.credential_id = get_credential_id_from_etcd_key(val.key)
else
-- Normally wouldn't get here:
-- it should belong to a consumer for any credential.
core.log.error("failed to get the consumer for the credential,",
" a wild credential has appeared!",
" credential key: ", val.key, ", consumer name: ", consumer_name)
return nil, "failed to get the consumer for the credential"
end
else
consumer = core.table.clone(val.value)
consumer.modifiedIndex = val.modifiedIndex
end
-- if the consumer has labels, set the field custom_id to it.
-- the custom_id is used to set in the request headers to the upstream.
if consumer.labels then
consumer.custom_id = consumer.labels["custom_id"]
end
-- Note: the id here is the key of consumer data, which
-- is 'username' field in admin
consumer.consumer_name = consumer.id
consumer.auth_conf = plugin_config
return consumer
end
function plugin_consumer()
local plugins = {}
if consumers.values == nil then
return plugins
end
-- consumers.values is the list that got from etcd by prefix key {etcd_prefix}/consumers.
-- So it contains consumers and credentials.
-- The val in the for-loop may be a Consumer or a Credential.
for _, val in ipairs(consumers.values) do
if type(val) ~= "table" then
goto CONTINUE
end
for name, config in pairs(val.value.plugins or {}) do
local plugin_obj = plugin.get(name)
if plugin_obj and plugin_obj.type == "auth" then
if not plugins[name] then
plugins[name] = {
nodes = {},
len = 0,
conf_version = consumers.conf_version
}
end
local consumer = consumers_id_lrucache(val.value.id .. name,
val.modifiedIndex, construct_consumer_data, val, config)
if consumer == nil then
goto CONTINUE
end
plugins[name].len = plugins[name].len + 1
core.table.insert(plugins[name].nodes, plugins[name].len,
consumer)
core.log.info("consumer:", core.json.delay_encode(consumer))
end
end
::CONTINUE::
end
return plugins
end
end
_M.filter_consumers_list = filter_consumers_list
function _M.get_consumer_key_from_credential_key(key)
local uri_segs = core.utils.split_uri(key)
return "/consumers/" .. uri_segs[3]
end
function _M.plugin(plugin_name)
local plugin_conf = core.lrucache.global("/consumers",
consumers.conf_version, plugin_consumer)
return plugin_conf[plugin_name]
end
function _M.consumers_conf(plugin_name)
return _M.plugin(plugin_name)
end
-- attach chosen consumer to the ctx, used in auth plugin
function _M.attach_consumer(ctx, consumer, conf)
ctx.consumer = consumer
ctx.consumer_name = consumer.consumer_name
ctx.consumer_group_id = consumer.group_id
ctx.consumer_ver = conf.conf_version
core.request.set_header(ctx, "X-Consumer-Username", consumer.username)
core.request.set_header(ctx, "X-Credential-Identifier", consumer.credential_id)
core.request.set_header(ctx, "X-Consumer-Custom-ID", consumer.custom_id)
end
function _M.consumers()
if not consumers then
return nil, nil
end
return filter_consumers_list(consumers.values), consumers.conf_version
end
local create_consume_cache
do
local consumer_lrucache = core.lrucache.new({
count = consumers_count_for_lrucache
})
local function fill_consumer_secret(consumer)
local new_consumer = core.table.clone(consumer)
new_consumer.auth_conf = secret.fetch_secrets(new_consumer.auth_conf, false)
return new_consumer
end
function create_consume_cache(consumers_conf, key_attr)
local consumer_names = {}
for _, consumer in ipairs(consumers_conf.nodes) do
core.log.info("consumer node: ", core.json.delay_encode(consumer))
local new_consumer = consumer_lrucache(consumer, nil,
fill_consumer_secret, consumer)
consumer_names[new_consumer.auth_conf[key_attr]] = new_consumer
end
return consumer_names
end
end
function _M.consumers_kv(plugin_name, consumer_conf, key_attr)
local consumers = lrucache("consumers_key#" .. plugin_name, consumer_conf.conf_version,
create_consume_cache, consumer_conf, key_attr)
return consumers
end
function _M.find_consumer(plugin_name, key, key_value)
local consumer
local consumer_conf
consumer_conf = _M.plugin(plugin_name)
if not consumer_conf then
return nil, nil, "Missing related consumer"
end
local consumers = _M.consumers_kv(plugin_name, consumer_conf, key)
consumer = consumers[key_value]
return consumer, consumer_conf
end
local function check_consumer(consumer, key)
local data_valid
local err
if is_credential_etcd_key(key) then
data_valid, err = check_schema(core.schema.credential, consumer)
else
data_valid, err = check_schema(core.schema.consumer, consumer)
end
if not data_valid then
return data_valid, err
end
return plugin_checker(consumer, core.schema.TYPE_CONSUMER)
end
function _M.init_worker()
local err
local cfg = {
automatic = true,
checker = check_consumer,
}
consumers, err = core.config.new("/consumers", cfg)
if not consumers then
error("failed to create etcd instance for fetching consumers: " .. err)
return
end
end
local function get_anonymous_consumer_from_local_cache(name)
local anon_consumer_raw = consumers:get(name)
if not anon_consumer_raw or not anon_consumer_raw.value or
not anon_consumer_raw.value.id or not anon_consumer_raw.modifiedIndex then
return nil, nil, "failed to get anonymous consumer " .. name
end
-- make structure of anon_consumer similar to that of consumer_mod.consumers_kv's response
local anon_consumer = anon_consumer_raw.value
anon_consumer.consumer_name = anon_consumer_raw.value.id
anon_consumer.modifiedIndex = anon_consumer_raw.modifiedIndex
local anon_consumer_conf = {
conf_version = anon_consumer_raw.modifiedIndex
}
return anon_consumer, anon_consumer_conf
end
function _M.get_anonymous_consumer(name)
local anon_consumer, anon_consumer_conf, err
anon_consumer, anon_consumer_conf, err = get_anonymous_consumer_from_local_cache(name)
return anon_consumer, anon_consumer_conf, err
end
return _M

View File

@@ -0,0 +1,55 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local plugin_checker = require("apisix.plugin").plugin_checker
local error = error
local consumer_groups
local _M = {
}
function _M.init_worker()
local err
consumer_groups, err = core.config.new("/consumer_groups", {
automatic = true,
item_schema = core.schema.consumer_group,
checker = plugin_checker,
})
if not consumer_groups then
error("failed to sync /consumer_groups: " .. err)
end
end
function _M.consumer_groups()
if not consumer_groups then
return nil, nil
end
return consumer_groups.values, consumer_groups.conf_version
end
function _M.get(id)
return consumer_groups:get(id)
end
return _M

View File

@@ -0,0 +1,212 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local require = require
local router = require("apisix.utils.router")
local radixtree = require("resty.radixtree")
local builtin_v1_routes = require("apisix.control.v1")
local plugin_mod = require("apisix.plugin")
local core = require("apisix.core")
local str_sub = string.sub
local ipairs = ipairs
local pairs = pairs
local type = type
local ngx = ngx
local get_method = ngx.req.get_method
local events = require("apisix.events")
local _M = {}
local function format_dismod_uri(mod_name, uri)
if core.string.has_prefix(uri, "/v1/") then
return uri
end
local tmp = {"/v1/discovery/", mod_name}
if not core.string.has_prefix(uri, "/") then
core.table.insert(tmp, "/")
end
core.table.insert(tmp, uri)
return core.table.concat(tmp, "")
end
-- we do not hardcode the discovery module's control api uri
local function format_dismod_control_api_uris(mod_name, api_route)
if not api_route or #api_route == 0 then
return api_route
end
local clone_route = core.table.clone(api_route)
for _, v in ipairs(clone_route) do
local uris = v.uris
local target_uris = core.table.new(#uris, 0)
for _, uri in ipairs(uris) do
local target_uri = format_dismod_uri(mod_name, uri)
core.table.insert(target_uris, target_uri)
end
v.uris = target_uris
end
return clone_route
end
local fetch_control_api_router
do
local function register_api_routes(routes, api_routes)
for _, route in ipairs(api_routes) do
core.table.insert(routes, {
methods = route.methods,
-- note that it is 'uris' for control API, which is an array of strings
paths = route.uris,
handler = function (api_ctx)
local code, body = route.handler(api_ctx)
if code or body then
if type(body) == "table" and ngx.header["Content-Type"] == nil then
core.response.set_header("Content-Type", "application/json")
end
core.response.exit(code, body)
end
end
})
end
end
local routes = {}
local v1_routes = {}
local function empty_func() end
function fetch_control_api_router()
core.table.clear(routes)
for _, plugin in ipairs(plugin_mod.plugins) do
local api_fun = plugin.control_api
if api_fun then
local api_route = api_fun()
register_api_routes(routes, api_route)
end
end
local discovery_type = require("apisix.core.config_local").local_conf().discovery
if discovery_type then
local discovery = require("apisix.discovery.init").discovery
local dump_apis = {}
for key, _ in pairs(discovery_type) do
local dis_mod = discovery[key]
-- if discovery module has control_api method, support it
local api_fun = dis_mod.control_api
if api_fun then
local api_route = api_fun()
local format_route = format_dismod_control_api_uris(key, api_route)
register_api_routes(routes, format_route)
end
local dump_data = dis_mod.dump_data
if dump_data then
local target_uri = format_dismod_uri(key, "/dump")
local item = {
methods = {"GET"},
uris = {target_uri},
handler = function()
return 200, dump_data()
end
}
core.table.insert(dump_apis, item)
end
end
if #dump_apis > 0 then
core.log.notice("dump_apis: ", core.json.encode(dump_apis, true))
register_api_routes(routes, dump_apis)
end
end
core.table.clear(v1_routes)
register_api_routes(v1_routes, builtin_v1_routes)
local v1_router, err = router.new(v1_routes)
if not v1_router then
return nil, err
end
core.table.insert(routes, {
paths = {"/v1/*"},
filter_fun = function(vars, opts, ...)
local uri = str_sub(vars.uri, #"/v1" + 1)
return v1_router:dispatch(uri, opts, ...)
end,
handler = empty_func,
})
local with_parameter = false
local conf = core.config.local_conf()
if conf.apisix.enable_control and conf.apisix.control then
if conf.apisix.control.router == "radixtree_uri_with_parameter" then
with_parameter = true
end
end
if with_parameter then
return radixtree.new(routes)
else
return router.new(routes)
end
end
end -- do
do
local match_opts = {}
local cached_version
local router
function _M.match(uri)
if cached_version ~= plugin_mod.load_times then
local err
router, err = fetch_control_api_router()
if router == nil then
core.log.error("failed to fetch valid api router: ", err)
return false
end
cached_version = plugin_mod.load_times
end
core.table.clear(match_opts)
match_opts.method = get_method()
return router:dispatch(uri, match_opts)
end
end -- do
local function reload_plugins()
core.log.info("start to hot reload plugins")
plugin_mod.load()
end
function _M.init_worker()
-- register reload plugin handler
events:register(reload_plugins, builtin_v1_routes.reload_event, "PUT")
end
return _M

View File

@@ -0,0 +1,506 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local require = require
local core = require("apisix.core")
local plugin = require("apisix.plugin")
local get_routes = require("apisix.router").http_routes
local get_services = require("apisix.http.service").services
local upstream_mod = require("apisix.upstream")
local get_upstreams = upstream_mod.upstreams
local collectgarbage = collectgarbage
local ipairs = ipairs
local pcall = pcall
local str_format = string.format
local ngx = ngx
local ngx_var = ngx.var
local events = require("apisix.events")
local _M = {}
_M.RELOAD_EVENT = 'control-api-plugin-reload'
function _M.schema()
local http_plugins, stream_plugins = plugin.get_all({
version = true,
priority = true,
schema = true,
metadata_schema = true,
consumer_schema = true,
type = true,
scope = true,
})
local schema = {
main = {
consumer = core.schema.consumer,
consumer_group = core.schema.consumer_group,
global_rule = core.schema.global_rule,
plugin_config = core.schema.plugin_config,
plugins = core.schema.plugins,
proto = core.schema.proto,
route = core.schema.route,
service = core.schema.service,
ssl = core.schema.ssl,
stream_route = core.schema.stream_route,
upstream = core.schema.upstream,
upstream_hash_header_schema = core.schema.upstream_hash_header_schema,
upstream_hash_vars_schema = core.schema.upstream_hash_vars_schema,
},
plugins = http_plugins,
stream_plugins = stream_plugins,
}
return 200, schema
end
local healthcheck
local function extra_checker_info(value)
if not healthcheck then
healthcheck = require("resty.healthcheck")
end
local name = upstream_mod.get_healthchecker_name(value)
local nodes, err = healthcheck.get_target_list(name, "upstream-healthcheck")
if err then
core.log.error("healthcheck.get_target_list failed: ", err)
end
return {
name = value.key,
nodes = nodes,
}
end
local function get_checker_type(checks)
if checks.active and checks.active.type then
return checks.active.type
elseif checks.passive and checks.passive.type then
return checks.passive.type
end
end
local function iter_and_add_healthcheck_info(infos, values)
if not values then
return
end
for _, value in core.config_util.iterate_values(values) do
local checks = value.value.checks or (value.value.upstream and value.value.upstream.checks)
if checks then
local info = extra_checker_info(value)
info.type = get_checker_type(checks)
core.table.insert(infos, info)
end
end
end
local HTML_TEMPLATE = [[
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>APISIX upstream check status</title>
</head>
<body>
<h1>APISIX upstream check status</h1>
<table style="background-color:white" cellspacing="0" cellpadding="3" border="1">
<tr bgcolor="#C0C0C0">
<th>Index</th>
<th>Upstream</th>
<th>Check type</th>
<th>Host</th>
<th>Status</th>
<th>Success counts</th>
<th>TCP Failures</th>
<th>HTTP Failures</th>
<th>TIMEOUT Failures</th>
</tr>
{% local i = 0 %}
{% for _, stat in ipairs(stats) do %}
{% for _, node in ipairs(stat.nodes) do %}
{% i = i + 1 %}
{% if node.status == "healthy" or node.status == "mostly_healthy" then %}
<tr>
{% else %}
<tr bgcolor="#FF0000">
{% end %}
<td>{* i *}</td>
<td>{* stat.name *}</td>
<td>{* stat.type *}</td>
<td>{* node.ip .. ":" .. node.port *}</td>
<td>{* node.status *}</td>
<td>{* node.counter.success *}</td>
<td>{* node.counter.tcp_failure *}</td>
<td>{* node.counter.http_failure *}</td>
<td>{* node.counter.timeout_failure *}</td>
</tr>
{% end %}
{% end %}
</table>
</body>
</html>
]]
local html_render
local function try_render_html(data)
if not html_render then
local template = require("resty.template")
html_render = template.compile(HTML_TEMPLATE)
end
local accept = ngx_var.http_accept
if accept and accept:find("text/html") then
local ok, out = pcall(html_render, data)
if not ok then
local err = str_format("HTML template rendering: %s", out)
core.log.error(err)
return nil, err
end
return out
end
end
local function _get_health_checkers()
local infos = {}
local routes = get_routes()
iter_and_add_healthcheck_info(infos, routes)
local services = get_services()
iter_and_add_healthcheck_info(infos, services)
local upstreams = get_upstreams()
iter_and_add_healthcheck_info(infos, upstreams)
return infos
end
function _M.get_health_checkers()
local infos = _get_health_checkers()
local out, err = try_render_html({stats=infos})
if out then
core.response.set_header("Content-Type", "text/html")
return 200, out
end
if err then
return 503, {error_msg = err}
end
return 200, infos
end
local function iter_and_find_healthcheck_info(values, src_type, src_id)
if not values then
return nil, str_format("%s[%s] not found", src_type, src_id)
end
for _, value in core.config_util.iterate_values(values) do
if value.value.id == src_id then
local checks = value.value.checks or
(value.value.upstream and value.value.upstream.checks)
if not checks then
return nil, str_format("no checker for %s[%s]", src_type, src_id)
end
local info = extra_checker_info(value)
info.type = get_checker_type(checks)
return info
end
end
return nil, str_format("%s[%s] not found", src_type, src_id)
end
function _M.get_health_checker()
local uri_segs = core.utils.split_uri(ngx_var.uri)
core.log.info("healthcheck uri: ", core.json.delay_encode(uri_segs))
local src_type, src_id = uri_segs[4], uri_segs[5]
if not src_id then
return 404, {error_msg = str_format("missing src id for src type %s", src_type)}
end
local values
if src_type == "routes" then
values = get_routes()
elseif src_type == "services" then
values = get_services()
elseif src_type == "upstreams" then
values = get_upstreams()
else
return 400, {error_msg = str_format("invalid src type %s", src_type)}
end
local info, err = iter_and_find_healthcheck_info(values, src_type, src_id)
if not info then
return 404, {error_msg = err}
end
local out, err = try_render_html({stats={info}})
if out then
core.response.set_header("Content-Type", "text/html")
return 200, out
end
if err then
return 503, {error_msg = err}
end
return 200, info
end
local function iter_add_get_routes_info(values, route_id)
local infos = {}
for _, route in core.config_util.iterate_values(values) do
local new_route = core.table.deepcopy(route)
if new_route.value.upstream and new_route.value.upstream.parent then
new_route.value.upstream.parent = nil
end
-- remove healthcheck info
new_route.checker = nil
new_route.checker_idx = nil
new_route.checker_upstream = nil
new_route.clean_handlers = nil
core.table.insert(infos, new_route)
-- check the route id
if route_id and route.value.id == route_id then
return new_route
end
end
if not route_id then
return infos
end
return nil
end
function _M.dump_all_routes_info()
local routes = get_routes()
local infos = iter_add_get_routes_info(routes, nil)
return 200, infos
end
function _M.dump_route_info()
local routes = get_routes()
local uri_segs = core.utils.split_uri(ngx_var.uri)
local route_id = uri_segs[4]
local route = iter_add_get_routes_info(routes, route_id)
if not route then
return 404, {error_msg = str_format("route[%s] not found", route_id)}
end
return 200, route
end
local function iter_add_get_upstream_info(values, upstream_id)
if not values then
return nil
end
local infos = {}
for _, upstream in core.config_util.iterate_values(values) do
local new_upstream = core.table.deepcopy(upstream)
core.table.insert(infos, new_upstream)
if new_upstream.value and new_upstream.value.parent then
new_upstream.value.parent = nil
end
-- check the upstream id
if upstream_id and upstream.value.id == upstream_id then
return new_upstream
end
end
if not upstream_id then
return infos
end
return nil
end
function _M.dump_all_upstreams_info()
local upstreams = get_upstreams()
local infos = iter_add_get_upstream_info(upstreams, nil)
return 200, infos
end
function _M.dump_upstream_info()
local upstreams = get_upstreams()
local uri_segs = core.utils.split_uri(ngx_var.uri)
local upstream_id = uri_segs[4]
local upstream = iter_add_get_upstream_info(upstreams, upstream_id)
if not upstream then
return 404, {error_msg = str_format("upstream[%s] not found", upstream_id)}
end
return 200, upstream
end
function _M.trigger_gc()
-- TODO: find a way to trigger GC in the stream subsystem
collectgarbage()
return 200
end
local function iter_add_get_services_info(values, svc_id)
local infos = {}
for _, svc in core.config_util.iterate_values(values) do
local new_svc = core.table.deepcopy(svc)
if new_svc.value.upstream and new_svc.value.upstream.parent then
new_svc.value.upstream.parent = nil
end
-- remove healthcheck info
new_svc.checker = nil
new_svc.checker_idx = nil
new_svc.checker_upstream = nil
new_svc.clean_handlers = nil
core.table.insert(infos, new_svc)
-- check the service id
if svc_id and svc.value.id == svc_id then
return new_svc
end
end
if not svc_id then
return infos
end
return nil
end
function _M.dump_all_services_info()
local services = get_services()
local infos = iter_add_get_services_info(services, nil)
return 200, infos
end
function _M.dump_service_info()
local services = get_services()
local uri_segs = core.utils.split_uri(ngx_var.uri)
local svc_id = uri_segs[4]
local info = iter_add_get_services_info(services, svc_id)
if not info then
return 404, {error_msg = str_format("service[%s] not found", svc_id)}
end
return 200, info
end
function _M.dump_all_plugin_metadata()
local names = core.config.local_conf().plugins
local metadatas = core.table.new(0, #names)
for _, name in ipairs(names) do
local metadata = plugin.plugin_metadata(name)
if metadata then
core.table.insert(metadatas, metadata.value)
end
end
return 200, metadatas
end
function _M.dump_plugin_metadata()
local uri_segs = core.utils.split_uri(ngx_var.uri)
local name = uri_segs[4]
local metadata = plugin.plugin_metadata(name)
if not metadata then
return 404, {error_msg = str_format("plugin metadata[%s] not found", name)}
end
return 200, metadata.value
end
function _M.post_reload_plugins()
local success, err = events:post(_M.RELOAD_EVENT, ngx.req.get_method(), ngx.time())
if not success then
core.response.exit(503, err)
end
core.response.exit(200, "done")
end
return {
-- /v1/schema
{
methods = {"GET"},
uris = {"/schema"},
handler = _M.schema,
},
-- /v1/healthcheck
{
methods = {"GET"},
uris = {"/healthcheck"},
handler = _M.get_health_checkers,
},
-- /v1/healthcheck/{src_type}/{src_id}
{
methods = {"GET"},
uris = {"/healthcheck/*"},
handler = _M.get_health_checker,
},
-- /v1/gc
{
methods = {"POST"},
uris = {"/gc"},
handler = _M.trigger_gc,
},
-- /v1/routes
{
methods = {"GET"},
uris = {"/routes"},
handler = _M.dump_all_routes_info,
},
-- /v1/route/*
{
methods = {"GET"},
uris = {"/route/*"},
handler = _M.dump_route_info,
},
-- /v1/services
{
methods = {"GET"},
uris = {"/services"},
handler = _M.dump_all_services_info
},
-- /v1/service/*
{
methods = {"GET"},
uris = {"/service/*"},
handler = _M.dump_service_info
},
-- /v1/upstreams
{
methods = {"GET"},
uris = {"/upstreams"},
handler = _M.dump_all_upstreams_info,
},
-- /v1/upstream/*
{
methods = {"GET"},
uris = {"/upstream/*"},
handler = _M.dump_upstream_info,
},
-- /v1/plugin_metadatas
{
methods = {"GET"},
uris = {"/plugin_metadatas"},
handler = _M.dump_all_plugin_metadata,
},
-- /v1/plugin_metadata/*
{
methods = {"GET"},
uris = {"/plugin_metadata/*"},
handler = _M.dump_plugin_metadata,
},
-- /v1/plugins/reload
{
methods = {"PUT"},
uris = {"/plugins/reload"},
handler = _M.post_reload_plugins,
},
get_health_checkers = _get_health_checkers,
reload_event = _M.RELOAD_EVENT,
}

View File

@@ -0,0 +1,68 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local log = require("apisix.core.log")
local utils = require("apisix.core.utils")
local local_conf, err = require("apisix.core.config_local").local_conf()
if not local_conf then
error("failed to parse yaml config: " .. err)
end
local config_provider = local_conf.deployment and local_conf.deployment.config_provider
or "etcd"
log.info("use config_provider: ", config_provider)
local config
-- Currently, we handle JSON parsing in config_yaml, so special processing is needed here.
if config_provider == "json" then
config = require("apisix.core.config_yaml")
config.file_type = "json"
else
config = require("apisix.core.config_" .. config_provider)
end
config.type = config_provider
return {
version = require("apisix.core.version"),
log = log,
config = config,
config_util = require("apisix.core.config_util"),
sleep = utils.sleep,
json = require("apisix.core.json"),
table = require("apisix.core.table"),
request = require("apisix.core.request"),
response = require("apisix.core.response"),
lrucache = require("apisix.core.lrucache"),
schema = require("apisix.schema_def"),
string = require("apisix.core.string"),
ctx = require("apisix.core.ctx"),
timer = require("apisix.core.timer"),
id = require("apisix.core.id"),
ip = require("apisix.core.ip"),
io = require("apisix.core.io"),
utils = utils,
dns_client = require("apisix.core.dns.client"),
etcd = require("apisix.core.etcd"),
tablepool = require("tablepool"),
resolver = require("apisix.core.resolver"),
os = require("apisix.core.os"),
pubsub = require("apisix.core.pubsub"),
math = require("apisix.core.math"),
event = require("apisix.core.event"),
env = require("apisix.core.env"),
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,71 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- Get configuration information.
--
-- @module core.config_local
local file = require("apisix.cli.file")
local _M = {}
local config_data
function _M.clear_cache()
config_data = nil
end
---
-- Get the local config info.
-- The configuration information consists of two parts, user-defined configuration in
-- `conf/config.yaml` and default configuration in `conf/config-default.yaml`. The configuration
-- of the same name present in `conf/config.yaml` will overwrite `conf/config-default.yaml`.
-- The final full configuration is `conf/config.yaml` and the default configuration in
-- `conf/config-default.yaml` that is not overwritten.
--
-- @function core.config_local.local_conf
-- @treturn table The configuration information.
-- @usage
-- -- Given a config item in `conf/config.yaml`:
-- --
-- -- apisix:
-- -- ssl:
-- -- fallback_sni: "a.test2.com"
-- --
-- -- you can get the value of `fallback_sni` by:
-- local local_conf = core.config.local_conf()
-- local fallback_sni = core.table.try_read_attr(
-- local_conf, "apisix", "ssl", "fallback_sni") -- "a.test2.com"
function _M.local_conf(force)
if not force and config_data then
return config_data
end
local default_conf, err = file.read_yaml_conf()
if not default_conf then
return nil, err
end
config_data = default_conf
return config_data
end
return _M

View File

@@ -0,0 +1,219 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- Collection of util functions
--
-- @module core.config_util
local core_tab = require("apisix.core.table")
local log = require("apisix.core.log")
local str_byte = string.byte
local str_char = string.char
local ipairs = ipairs
local setmetatable = setmetatable
local tostring = tostring
local type = type
local _M = {}
local function _iterate_values(self, tab)
while true do
self.idx = self.idx + 1
local v = tab[self.idx]
if type(v) == "table" then
return self.idx, v
end
if v == nil then
return nil, nil
end
-- skip the tombstone
end
end
function _M.iterate_values(tab)
local iter = setmetatable({idx = 0}, {__call = _iterate_values})
return iter, tab, 0
end
-- Add a clean handler to a runtime configuration item.
-- The clean handler will be called when the item is deleted from configuration
-- or cancelled. Note that Nginx worker exit doesn't trigger the clean handler.
-- Return an index so that we can cancel it later.
function _M.add_clean_handler(item, func)
if not item.clean_handlers then
return nil, "clean handlers for the item are nil"
end
if not item.clean_handlers._id then
item.clean_handlers._id = 1
end
local id = item.clean_handlers._id
item.clean_handlers._id = item.clean_handlers._id + 1
core_tab.insert(item.clean_handlers, {f = func, id = id})
return id
end
-- cancel a clean handler added by add_clean_handler.
-- If `fire` is true, call the clean handler.
function _M.cancel_clean_handler(item, idx, fire)
local pos, f
-- the number of pending clean handler is small so we can cancel them in O(n)
for i, clean_handler in ipairs(item.clean_handlers) do
if clean_handler.id == idx then
pos = i
f = clean_handler.f
break
end
end
if not pos then
log.error("failed to find clean_handler with idx ", idx)
return
end
core_tab.remove(item.clean_handlers, pos)
if not fire then
return
end
if f then
f(item)
else
log.error("The function used to clear the health checker is nil, please check")
end
end
-- fire all clean handlers added by add_clean_handler.
function _M.fire_all_clean_handlers(item)
-- When the key is deleted, the item will be set to false.
if not item then
return
end
if not item.clean_handlers then
return
end
for _, clean_handler in ipairs(item.clean_handlers) do
clean_handler.f(item)
end
item.clean_handlers = {}
end
---
-- Convert different time units to seconds as time units.
-- Time intervals can be specified in milliseconds, seconds, minutes, hours, days and so on,
-- using the following suffixes:
-- ms milliseconds
-- s seconds
-- m minutes
-- h hours
-- d days
-- w weeks
-- M months, 30 days
-- y years, 365 days
-- Multiple units can be combined in a single value by specifying them in the order from the most
-- to the least significant, and optionally separated by whitespace.
-- A value without a suffix means seconds.
--
-- @function core.config_util.parse_time_unit
-- @tparam number|string s Strings with time units, e.g. "60m".
-- @treturn number Number of seconds after conversion
-- @usage
-- local seconds = core.config_util.parse_time_unit("60m") -- 3600
function _M.parse_time_unit(s)
local typ = type(s)
if typ == "number" then
return s
end
if typ ~= "string" or #s == 0 then
return nil, "invalid data: " .. tostring(s)
end
local size = 0
local size_in_unit = 0
local step = 60 * 60 * 24 * 365
local with_ms = false
for i = 1, #s do
local scale
local unit = str_byte(s, i)
if unit == 121 then -- y
scale = 60 * 60 * 24 * 365
elseif unit == 77 then -- M
scale = 60 * 60 * 24 * 30
elseif unit == 119 then -- w
scale = 60 * 60 * 24 * 7
elseif unit == 100 then -- d
scale = 60 * 60 * 24
elseif unit == 104 then -- h
scale = 60 * 60
elseif unit == 109 then -- m
unit = str_byte(s, i + 1)
if unit == 115 then -- ms
size = size * 1000
with_ms = true
step = 0
break
end
scale = 60
elseif unit == 115 then -- s
scale = 1
elseif 48 <= unit and unit <= 57 then
size_in_unit = size_in_unit * 10 + unit - 48
elseif unit ~= 32 then
return nil, "invalid data: " .. str_char(unit)
end
if scale ~= nil then
if scale > step then
return nil, "unexpected unit: " .. str_char(unit)
end
step = scale
size = size + scale * size_in_unit
size_in_unit = 0
end
end
if size_in_unit > 0 then
if step == 1 then
return nil, "specific unit conflicts with the default unit second"
end
size = size + size_in_unit
end
if with_ms then
size = size / 1000
end
return size
end
return _M

View File

@@ -0,0 +1,378 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- Get configuration form ngx.shared.DICT
--
-- @module core.config_xds
local config_local = require("apisix.core.config_local")
local config_util = require("apisix.core.config_util")
local string = require("apisix.core.string")
local log = require("apisix.core.log")
local json = require("apisix.core.json")
local os = require("apisix.core.os")
local ngx_sleep = require("apisix.core.utils").sleep
local check_schema = require("apisix.core.schema").check
local new_tab = require("table.new")
local table = table
local insert_tab = table.insert
local error = error
local pcall = pcall
local tostring = tostring
local setmetatable = setmetatable
local io = io
local io_open = io.open
local io_close = io.close
local package = package
local ipairs = ipairs
local type = type
local sub_str = string.sub
local ffi = require ("ffi")
local C = ffi.C
local config = ngx.shared["xds-config"]
local conf_ver = ngx.shared["xds-config-version"]
local is_http = ngx.config.subsystem == "http"
local ngx_re_match = ngx.re.match
local ngx_re_gmatch = ngx.re.gmatch
local ngx_timer_every = ngx.timer.every
local ngx_timer_at = ngx.timer.at
local exiting = ngx.worker.exiting
local ngx_time = ngx.time
local xds_lib_name = "libxds.so"
local process
if is_http then
process = require("ngx.process")
end
local shdict_udata_to_zone
if not pcall(function() return C.ngx_http_lua_ffi_shdict_udata_to_zone end) then
shdict_udata_to_zone = C.ngx_meta_lua_ffi_shdict_udata_to_zone
else
shdict_udata_to_zone = C.ngx_http_lua_ffi_shdict_udata_to_zone
end
ffi.cdef[[
extern void initial(void* config_zone, void* version_zone);
]]
local created_obj = {}
local _M = {
version = 0.1,
local_conf = config_local.local_conf,
}
local mt = {
__index = _M,
__tostring = function(self)
return " xds key: " .. self.key
end
}
-- todo: refactor this function in chash.lua and radixtree.lua
local function load_shared_lib(lib_name)
local cpath = package.cpath
local tried_paths = new_tab(32, 0)
local i = 1
local iter, err = ngx_re_gmatch(cpath, "[^;]+", "jo")
if not iter then
error("failed to gmatch: " .. err)
end
while true do
local it = iter()
local fpath
fpath, err = ngx_re_match(it[0], "(.*/)", "jo")
if err then
error("failed to match: " .. err)
end
local spath = fpath[0] .. lib_name
local f = io_open(spath)
if f ~= nil then
io_close(f)
return ffi.load(spath)
end
tried_paths[i] = spath
i = i + 1
if not it then
break
end
end
return nil, tried_paths
end
local function load_libxds(lib_name)
local xdsagent, tried_paths = load_shared_lib(lib_name)
if not xdsagent then
tried_paths[#tried_paths + 1] = 'tried above paths but can not load ' .. lib_name
error("can not load xds library, tried paths: " ..
table.concat(tried_paths, '\r\n', 1, #tried_paths))
end
local config_zone = shdict_udata_to_zone(config[1])
local config_shd_cdata = ffi.cast("void*", config_zone)
local conf_ver_zone = shdict_udata_to_zone(conf_ver[1])
local conf_ver_shd_cdata = ffi.cast("void*", conf_ver_zone)
xdsagent.initial(config_shd_cdata, conf_ver_shd_cdata)
end
local latest_version
local function sync_data(self)
if self.conf_version == latest_version then
return true
end
if self.values then
for _, val in ipairs(self.values) do
config_util.fire_all_clean_handlers(val)
end
self.values = nil
self.values_hash = nil
end
local keys = config:get_keys(0)
if not keys or #keys <= 0 then
-- xds did not write any data to shdict
return false, "no keys"
end
self.values = new_tab(#keys, 0)
self.values_hash = new_tab(0, #keys)
for _, key in ipairs(keys) do
if string.has_prefix(key, self.key) then
local data_valid = true
local conf_str = config:get(key, 0)
local conf, err = json.decode(conf_str)
if not conf then
data_valid = false
log.error("decode the conf of [", key, "] failed, err: ", err,
", conf_str: ", conf_str)
end
if not self.single_item and type(conf) ~= "table" then
data_valid = false
log.error("invalid conf of [", key, "], conf: ", conf,
", it should be an object")
end
if data_valid and self.item_schema then
local ok, err = check_schema(self.item_schema, conf)
if not ok then
data_valid = false
log.error("failed to check the conf of [", key, "] err:", err)
end
end
if data_valid and self.checker then
local ok, err = self.checker(conf)
if not ok then
data_valid = false
log.error("failed to check the conf of [", key, "] err:", err)
end
end
if data_valid then
if not conf.id then
conf.id = sub_str(key, #self.key + 2, #key + 1)
log.warn("the id of [", key, "] is nil, use the id: ", conf.id)
end
local conf_item = {value = conf, modifiedIndex = latest_version,
key = key}
insert_tab(self.values, conf_item)
self.values_hash[conf.id] = #self.values
conf_item.clean_handlers = {}
if self.filter then
self.filter(conf_item)
end
end
end
end
self.conf_version = latest_version
return true
end
local function _automatic_fetch(premature, self)
if premature then
return
end
local i = 0
while not exiting() and self.running and i <= 32 do
i = i + 1
local ok, ok2, err = pcall(sync_data, self)
if not ok then
err = ok2
log.error("failed to fetch data from xds: ",
err, ", ", tostring(self))
ngx_sleep(3)
break
elseif not ok2 and err then
-- todo: handler other error
if err ~= "wait for more time" and err ~= "no keys" and self.last_err ~= err then
log.error("failed to fetch data from xds, ", err, ", ", tostring(self))
end
if err ~= self.last_err then
self.last_err = err
self.last_err_time = ngx_time()
else
if ngx_time() - self.last_err_time >= 30 then
self.last_err = nil
end
end
ngx_sleep(0.5)
elseif not ok2 then
ngx_sleep(0.05)
else
ngx_sleep(0.1)
end
end
if not exiting() and self.running then
ngx_timer_at(0, _automatic_fetch, self)
end
end
local function fetch_version(premature)
if premature then
return
end
local version = conf_ver:get("version")
if not version then
return
end
if version ~= latest_version then
latest_version = version
end
end
function _M.new(key, opts)
local automatic = opts and opts.automatic
local item_schema = opts and opts.item_schema
local filter_fun = opts and opts.filter
local single_item = opts and opts.single_item
local checker = opts and opts.checker
local obj = setmetatable({
automatic = automatic,
item_schema = item_schema,
checker = checker,
sync_times = 0,
running = true,
conf_version = 0,
values = nil,
routes_hash = nil,
prev_index = nil,
last_err = nil,
last_err_time = nil,
key = key,
single_item = single_item,
filter = filter_fun,
}, mt)
if automatic then
if not key then
return nil, "missing `key` argument"
end
-- blocking until xds completes initial configuration
while true do
os.usleep(1000)
fetch_version()
if latest_version then
break
end
end
local ok, ok2, err = pcall(sync_data, obj)
if not ok then
err = ok2
end
if err then
log.error("failed to fetch data from xds ",
err, ", ", key)
end
ngx_timer_at(0, _automatic_fetch, obj)
end
if key then
created_obj[key] = obj
end
return obj
end
function _M.get(self, key)
if not self.values_hash then
return
end
local arr_idx = self.values_hash[tostring(key)]
if not arr_idx then
return nil
end
return self.values[arr_idx]
end
function _M.fetch_created_obj(key)
return created_obj[key]
end
function _M.init_worker()
if process.type() == "privileged agent" then
load_libxds(xds_lib_name)
end
ngx_timer_every(1, fetch_version)
return true
end
return _M

View File

@@ -0,0 +1,579 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- Get configuration information in Stand-alone mode.
--
-- @module core.config_yaml
local config_local = require("apisix.core.config_local")
local config_util = require("apisix.core.config_util")
local yaml = require("lyaml")
local log = require("apisix.core.log")
local json = require("apisix.core.json")
local new_tab = require("table.new")
local check_schema = require("apisix.core.schema").check
local profile = require("apisix.core.profile")
local lfs = require("lfs")
local file = require("apisix.cli.file")
local exiting = ngx.worker.exiting
local insert_tab = table.insert
local type = type
local ipairs = ipairs
local setmetatable = setmetatable
local ngx_sleep = require("apisix.core.utils").sleep
local ngx_timer_at = ngx.timer.at
local ngx_time = ngx.time
local ngx_shared = ngx.shared
local sub_str = string.sub
local tostring = tostring
local pcall = pcall
local io = io
local ngx = ngx
local re_find = ngx.re.find
local process = require("ngx.process")
local worker_id = ngx.worker.id
local created_obj = {}
local shared_dict
local status_report_shared_dict_name = "status-report"
local _M = {
version = 0.2,
local_conf = config_local.local_conf,
clear_local_cache = config_local.clear_cache,
-- yaml or json
file_type = "yaml",
ERR_NO_SHARED_DICT = "failed prepare standalone config shared dict, this will degrade "..
"to event broadcasting, and if a worker crashes, the configuration "..
"cannot be restored from other workers and shared dict"
}
local mt = {
__index = _M,
__tostring = function(self)
return "apisix.yaml key: " .. (self.key or "")
end
}
local apisix_yaml
local apisix_yaml_mtime
local config_yaml = {
path = profile:yaml_path("apisix"),
type = "yaml",
parse = function(self)
local f, err = io.open(self.path, "r")
if not f then
return nil, "failed to open file " .. self.path .. " : " .. err
end
f:seek('end', -10)
local end_flag = f:read("*a")
local found_end_flag = re_find(end_flag, [[#END\s*$]], "jo")
if not found_end_flag then
f:close()
return nil, "missing valid end flag in file " .. self.path
end
f:seek('set')
local raw_config = f:read("*a")
f:close()
return yaml.load(raw_config), nil
end
}
local config_json = {
-- `-5` to remove the "yaml" suffix
path = config_yaml.path:sub(1, -5) .. "json",
type = "json",
parse = function(self)
local f, err = io.open(self.path, "r")
if not f then
return nil, "failed to open file " .. self.path .. " : " .. err
end
local raw_config = f:read("*a")
f:close()
local config, err = json.decode(raw_config)
if err then
return nil, "failed to decode json: " .. err
end
return config, nil
end
}
local config_file_table = {
yaml = config_yaml,
json = config_json
}
local config_file = setmetatable({}, {
__index = function(_, key)
return config_file_table[_M.file_type][key]
end
})
local function sync_status_to_shdict(status)
if process.type() ~= "worker" then
return
end
local status_shdict = ngx.shared[status_report_shared_dict_name]
if not status_shdict then
return
end
local id = worker_id()
log.info("sync status to shared dict, id: ", id, " status: ", status)
status_shdict:set(id, status)
end
local function update_config(table, conf_version)
if not table then
log.error("failed update config: empty table")
return
end
local ok, err = file.resolve_conf_var(table)
if not ok then
log.error("failed to resolve variables:" .. err)
return
end
apisix_yaml = table
sync_status_to_shdict(true)
apisix_yaml_mtime = conf_version
end
_M._update_config = update_config
local function is_use_admin_api()
local local_conf, _ = config_local.local_conf()
return local_conf and local_conf.apisix and local_conf.apisix.enable_admin
end
local function read_apisix_config(premature, pre_mtime)
if premature then
return
end
local attributes, err = lfs.attributes(config_file.path)
if not attributes then
log.error("failed to fetch ", config_file.path, " attributes: ", err)
return
end
local last_modification_time = attributes.modification
if apisix_yaml_mtime == last_modification_time then
return
end
local config_new, err = config_file:parse()
if err then
log.error("failed to parse the content of file ", config_file.path, ": ", err)
return
end
update_config(config_new, last_modification_time)
log.warn("config file ", config_file.path, " reloaded.")
end
local function sync_data(self)
if not self.key then
return nil, "missing 'key' arguments"
end
local conf_version
if is_use_admin_api() then
conf_version = apisix_yaml[self.conf_version_key] or 0
else
if not apisix_yaml_mtime then
log.warn("wait for more time")
return nil, "failed to read local file " .. config_file.path
end
conf_version = apisix_yaml_mtime
end
if not conf_version or conf_version == self.conf_version then
return true
end
local items = apisix_yaml[self.key]
if not items then
self.values = new_tab(8, 0)
self.values_hash = new_tab(0, 8)
self.conf_version = conf_version
return true
end
if self.values and #self.values > 0 then
if is_use_admin_api() then
-- filter self.values to retain only those whose IDs exist in the new items list.
local exist_values = new_tab(8, 0)
self.values_hash = new_tab(0, 8)
local exist_items = {}
for _, item in ipairs(items) do
exist_items[tostring(item.id)] = true
end
-- remove objects that exist in the self.values but do not exist in the new items.
-- for removed items, trigger cleanup handlers.
for _, item in ipairs(self.values) do
local id = item.value.id
if not exist_items[id] then
config_util.fire_all_clean_handlers(item)
else
insert_tab(exist_values, item)
self.values_hash[id] = #exist_values
end
end
self.values = exist_values
else
for _, item in ipairs(self.values) do
config_util.fire_all_clean_handlers(item)
end
self.values = nil
end
end
if self.single_item then
-- treat items as a single item
self.values = new_tab(1, 0)
self.values_hash = new_tab(0, 1)
local item = items
local modifiedIndex = item.modifiedIndex or conf_version
local conf_item = {value = item, modifiedIndex = modifiedIndex,
key = "/" .. self.key}
local data_valid = true
local err
if self.item_schema then
data_valid, err = check_schema(self.item_schema, item)
if not data_valid then
log.error("failed to check item data of [", self.key,
"] err:", err, " ,val: ", json.delay_encode(item))
end
if data_valid and self.checker then
-- TODO: An opts table should be used
-- as different checkers may use different parameters
data_valid, err = self.checker(item, conf_item.key)
if not data_valid then
log.error("failed to check item data of [", self.key,
"] err:", err, " ,val: ", json.delay_encode(item))
end
end
end
if data_valid then
insert_tab(self.values, conf_item)
self.values_hash[self.key] = #self.values
conf_item.clean_handlers = {}
if self.filter then
self.filter(conf_item)
end
end
else
if not self.values then
self.values = new_tab(8, 0)
self.values_hash = new_tab(0, 8)
end
local err
for i, item in ipairs(items) do
local idx = tostring(i)
local data_valid = true
if type(item) ~= "table" then
data_valid = false
log.error("invalid item data of [", self.key .. "/" .. idx,
"], val: ", json.delay_encode(item),
", it should be an object")
end
local id = item.id or item.username or ("arr_" .. idx)
local modifiedIndex = item.modifiedIndex or conf_version
local conf_item = {value = item, modifiedIndex = modifiedIndex,
key = "/" .. self.key .. "/" .. id}
if data_valid and self.item_schema then
data_valid, err = check_schema(self.item_schema, item)
if not data_valid then
log.error("failed to check item data of [", self.key,
"] err:", err, " ,val: ", json.delay_encode(item))
end
end
if data_valid and self.checker then
data_valid, err = self.checker(item, conf_item.key)
if not data_valid then
log.error("failed to check item data of [", self.key,
"] err:", err, " ,val: ", json.delay_encode(item))
end
end
if data_valid then
local item_id = tostring(id)
local pre_index = self.values_hash[item_id]
if pre_index then
-- remove the old item
local pre_val = self.values[pre_index]
if pre_val and
(not item.modifiedIndex or pre_val.modifiedIndex ~= item.modifiedIndex) then
config_util.fire_all_clean_handlers(pre_val)
self.values[pre_index] = conf_item
conf_item.value.id = item_id
conf_item.clean_handlers = {}
end
else
insert_tab(self.values, conf_item)
self.values_hash[item_id] = #self.values
conf_item.value.id = item_id
conf_item.clean_handlers = {}
end
if self.filter then
self.filter(conf_item)
end
end
end
end
self.conf_version = conf_version
return true
end
function _M.get(self, key)
if not self.values_hash then
return
end
local arr_idx = self.values_hash[tostring(key)]
if not arr_idx then
return nil
end
return self.values[arr_idx]
end
local function _automatic_fetch(premature, self)
if premature then
return
end
-- the _automatic_fetch is only called in the timer, and according to the
-- documentation, ngx.shared.DICT.get can be executed there.
-- if the file's global variables have not yet been assigned values,
-- we can assume that the worker has not been initialized yet and try to
-- read any old data that may be present from the shared dict
-- try load from shared dict only on first startup, otherwise use event mechanism
if is_use_admin_api() and not shared_dict then
log.info("try to load config from shared dict")
local config, err
shared_dict = ngx_shared["standalone-config"] -- init shared dict in current worker
if not shared_dict then
log.error("failed to read config from shared dict: shared dict not found")
goto SKIP_SHARED_DICT
end
config, err = shared_dict:get("config")
if not config then
if err then -- if the key does not exist, the return values are both nil
log.error("failed to read config from shared dict: ", err)
end
log.info("no config found in shared dict")
goto SKIP_SHARED_DICT
end
log.info("startup config loaded from shared dict: ", config)
config, err = json.decode(tostring(config))
if not config then
log.error("failed to decode config from shared dict: ", err)
goto SKIP_SHARED_DICT
end
_M._update_config(config)
log.info("config loaded from shared dict")
::SKIP_SHARED_DICT::
if not shared_dict then
log.crit(_M.ERR_NO_SHARED_DICT)
-- fill that value to make the worker not try to read from shared dict again
shared_dict = "error"
end
end
local i = 0
while not exiting() and self.running and i <= 32 do
i = i + 1
local ok, ok2, err = pcall(sync_data, self)
if not ok then
err = ok2
log.error("failed to fetch data from local file " .. config_file.path .. ": ",
err, ", ", tostring(self))
ngx_sleep(3)
break
elseif not ok2 and err then
if err ~= "timeout" and err ~= "Key not found"
and self.last_err ~= err then
log.error("failed to fetch data from local file " .. config_file.path .. ": ",
err, ", ", tostring(self))
end
if err ~= self.last_err then
self.last_err = err
self.last_err_time = ngx_time()
else
if ngx_time() - self.last_err_time >= 30 then
self.last_err = nil
end
end
ngx_sleep(0.5)
elseif not ok2 then
ngx_sleep(0.05)
else
ngx_sleep(0.1)
end
end
if not exiting() and self.running then
ngx_timer_at(0, _automatic_fetch, self)
end
end
function _M.new(key, opts)
local local_conf, err = config_local.local_conf()
if not local_conf then
return nil, err
end
local automatic = opts and opts.automatic
local item_schema = opts and opts.item_schema
local filter_fun = opts and opts.filter
local single_item = opts and opts.single_item
local checker = opts and opts.checker
-- like /routes and /upstreams, remove first char `/`
if key then
key = sub_str(key, 2)
end
local obj = setmetatable({
automatic = automatic,
item_schema = item_schema,
checker = checker,
sync_times = 0,
running = true,
conf_version = 0,
values = nil,
routes_hash = nil,
prev_index = nil,
last_err = nil,
last_err_time = nil,
key = key,
conf_version_key = key and key .. "_conf_version",
single_item = single_item,
filter = filter_fun,
}, mt)
if automatic then
if not key then
return nil, "missing `key` argument"
end
local ok, ok2, err = pcall(sync_data, obj)
if not ok then
err = ok2
end
if err then
log.error("failed to fetch data from local file ", config_file.path, ": ",
err, ", ", key)
end
ngx_timer_at(0, _automatic_fetch, obj)
end
if key then
created_obj[key] = obj
end
return obj
end
function _M.close(self)
self.running = false
end
function _M.server_version(self)
return "apisix.yaml " .. _M.version
end
function _M.fetch_created_obj(key)
return created_obj[sub_str(key, 2)]
end
function _M.fetch_all_created_obj()
return created_obj
end
function _M.init()
if is_use_admin_api() then
return true
end
read_apisix_config()
return true
end
function _M.init_worker()
sync_status_to_shdict(false)
if is_use_admin_api() then
apisix_yaml = {}
apisix_yaml_mtime = 0
return true
end
-- sync data in each non-master process
ngx.timer.every(1, read_apisix_config)
return true
end
return _M

View File

@@ -0,0 +1,463 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- Define the request context.
--
-- @module core.ctx
local core_str = require("apisix.core.string")
local core_tab = require("apisix.core.table")
local request = require("apisix.core.request")
local log = require("apisix.core.log")
local json = require("apisix.core.json")
local config_local = require("apisix.core.config_local")
local tablepool = require("tablepool")
local get_var = require("resty.ngxvar").fetch
local get_request = require("resty.ngxvar").request
local ck = require "resty.cookie"
local multipart = require("multipart")
local util = require("apisix.cli.util")
local gq_parse = require("graphql").parse
local jp = require("jsonpath")
local setmetatable = setmetatable
local sub_str = string.sub
local ngx = ngx
local ngx_var = ngx.var
local re_gsub = ngx.re.gsub
local ipairs = ipairs
local type = type
local error = error
local pcall = pcall
local _M = {version = 0.2}
local GRAPHQL_DEFAULT_MAX_SIZE = 1048576 -- 1MiB
local GRAPHQL_REQ_DATA_KEY = "query"
local GRAPHQL_REQ_METHOD_HTTP_GET = "GET"
local GRAPHQL_REQ_METHOD_HTTP_POST = "POST"
local GRAPHQL_REQ_MIME_JSON = "application/json"
local fetch_graphql_data = {
[GRAPHQL_REQ_METHOD_HTTP_GET] = function(ctx, max_size)
local body = request.get_uri_args(ctx)[GRAPHQL_REQ_DATA_KEY]
if not body then
return nil, "failed to read graphql data, args[" ..
GRAPHQL_REQ_DATA_KEY .. "] is nil"
end
if type(body) == "table" then
body = body[1]
end
return body
end,
[GRAPHQL_REQ_METHOD_HTTP_POST] = function(ctx, max_size)
local body, err = request.get_body(max_size, ctx)
if not body then
return nil, "failed to read graphql data, " .. (err or "request body has zero size")
end
if request.header(ctx, "Content-Type") == GRAPHQL_REQ_MIME_JSON then
local res
res, err = json.decode(body)
if not res then
return nil, "failed to read graphql data, " .. err
end
if not res[GRAPHQL_REQ_DATA_KEY] then
return nil, "failed to read graphql data, json body[" ..
GRAPHQL_REQ_DATA_KEY .. "] is nil"
end
body = res[GRAPHQL_REQ_DATA_KEY]
end
return body
end
}
local function parse_graphql(ctx)
local local_conf, err = config_local.local_conf()
if not local_conf then
return nil, "failed to get local conf: " .. err
end
local max_size = GRAPHQL_DEFAULT_MAX_SIZE
local size = core_tab.try_read_attr(local_conf, "graphql", "max_size")
if size then
max_size = size
end
local method = request.get_method()
local func = fetch_graphql_data[method]
if not func then
return nil, "graphql not support `" .. method .. "` request"
end
local body
body, err = func(ctx, max_size)
if not body then
return nil, err
end
local ok, res = pcall(gq_parse, body)
if not ok then
return nil, "failed to parse graphql: " .. res .. " body: " .. body
end
if #res.definitions == 0 then
return nil, "empty graphql: " .. body
end
return res
end
local function get_parsed_graphql()
local ctx = ngx.ctx.api_ctx
if ctx._graphql then
return ctx._graphql
end
local res, err = parse_graphql(ctx)
if not res then
log.error(err)
ctx._graphql = {}
return ctx._graphql
end
if #res.definitions > 1 then
log.warn("Multiple operations are not supported.",
"Only the first one is handled")
end
local def = res.definitions[1]
local fields = def.selectionSet.selections
local root_fields = core_tab.new(#fields, 0)
for i, f in ipairs(fields) do
root_fields[i] = f.name.value
end
local name = ""
if def.name and def.name.value then
name = def.name.value
end
ctx._graphql = {
name = name,
operation = def.operation,
root_fields = root_fields,
}
return ctx._graphql
end
local CONTENT_TYPE_JSON = "application/json"
local CONTENT_TYPE_FORM_URLENCODED = "application/x-www-form-urlencoded"
local CONTENT_TYPE_MULTIPART_FORM = "multipart/form-data"
local function get_parsed_request_body(ctx)
local ct_header = request.header(ctx, "Content-Type") or ""
if core_str.find(ct_header, CONTENT_TYPE_JSON) then
local request_table, err = request.get_json_request_body_table()
if not request_table then
return nil, "failed to parse JSON body: " .. err
end
return request_table
end
if core_str.find(ct_header, CONTENT_TYPE_FORM_URLENCODED) then
local args, err = request.get_post_args()
if not args then
return nil, "failed to parse form data: " .. (err or "unknown error")
end
return args
end
if core_str.find(ct_header, CONTENT_TYPE_MULTIPART_FORM) then
local body = request.get_body()
local res = multipart(body, ct_header)
if not res then
return nil, "failed to parse multipart form data"
end
return res:get_all()
end
local err = "unsupported content-type in header: " .. ct_header ..
", supported types are: " ..
CONTENT_TYPE_JSON .. ", " ..
CONTENT_TYPE_FORM_URLENCODED .. ", " ..
CONTENT_TYPE_MULTIPART_FORM
return nil, err
end
do
local var_methods = {
method = ngx.req.get_method,
cookie = function ()
if ngx.var.http_cookie then
return ck:new()
end
end
}
local no_cacheable_var_names = {
-- var.args should not be cached as it can be changed via set_uri_args
args = true,
is_args = true,
}
local ngx_var_names = {
upstream_scheme = true,
upstream_host = true,
upstream_upgrade = true,
upstream_connection = true,
upstream_uri = true,
upstream_mirror_host = true,
upstream_mirror_uri = true,
upstream_cache_zone = true,
upstream_cache_zone_info = true,
upstream_no_cache = true,
upstream_cache_key = true,
upstream_cache_bypass = true,
var_x_forwarded_proto = true,
var_x_forwarded_port = true,
var_x_forwarded_host = true,
}
-- sort in alphabetical
local apisix_var_names = {
balancer_ip = true,
balancer_port = true,
consumer_group_id = true,
consumer_name = true,
resp_body = function(ctx)
-- only for logger and requires the logger to have a special configuration
return ctx.resp_body or ''
end,
route_id = true,
route_name = true,
service_id = true,
service_name = true,
}
local mt = {
__index = function(t, key)
local cached = t._cache[key]
if cached ~= nil then
log.debug("serving ctx value from cache for key: ", key)
return cached
end
if type(key) ~= "string" then
error("invalid argument, expect string value", 2)
end
local val
local method = var_methods[key]
if method then
val = method()
elseif core_str.has_prefix(key, "cookie_") then
local cookie = t.cookie
if cookie then
local err
val, err = cookie:get(sub_str(key, 8))
if err then
log.warn("failed to fetch cookie value by key: ",
key, " error: ", err)
end
end
elseif core_str.has_prefix(key, "arg_") then
local arg_key = sub_str(key, 5)
local args = request.get_uri_args()[arg_key]
if args then
if type(args) == "table" then
val = args[1]
else
val = args
end
end
elseif core_str.has_prefix(key, "post_arg_") then
-- only match default post form
local content_type = request.header(nil, "Content-Type")
if content_type ~= nil and core_str.has_prefix(content_type,
"application/x-www-form-urlencoded") then
local arg_key = sub_str(key, 10)
local args = request.get_post_args()[arg_key]
if args then
if type(args) == "table" then
val = args[1]
else
val = args
end
end
end
elseif core_str.has_prefix(key, "uri_param_") then
-- `uri_param_<name>` provides access to the uri parameters when using
-- radixtree_uri_with_parameter
if t._ctx.curr_req_matched then
local arg_key = sub_str(key, 11)
val = t._ctx.curr_req_matched[arg_key]
end
elseif core_str.has_prefix(key, "http_") then
local arg_key = key:lower()
arg_key = re_gsub(arg_key, "-", "_", "jo")
val = get_var(arg_key, t._request)
elseif core_str.has_prefix(key, "graphql_") then
-- trim the "graphql_" prefix
local arg_key = sub_str(key, 9)
val = get_parsed_graphql()[arg_key]
elseif core_str.has_prefix(key, "post_arg.") then
-- trim the "post_arg." prefix (10 characters)
local arg_key = sub_str(key, 10)
local parsed_body, err = get_parsed_request_body(t._ctx)
if not parsed_body then
log.warn("failed to fetch post args value by key: ", arg_key, " error: ", err)
return nil
end
if arg_key:find("[%[%*]") or arg_key:find("..", 1, true) then
arg_key = "$." .. arg_key
local results = jp.query(parsed_body, arg_key)
if #results == 0 then
val = nil
else
val = results
end
else
local parts = util.split(arg_key, "(.)")
local current = parsed_body
for _, part in ipairs(parts) do
if type(current) ~= "table" then
current = nil
break
end
current = current[part]
end
val = current
end
else
local getter = apisix_var_names[key]
if getter then
local ctx = t._ctx
if getter == true then
val = ctx and ctx[key]
else
-- the getter is registered by ctx.register_var
val = getter(ctx)
end
else
val = get_var(key, t._request)
end
end
if val ~= nil and not no_cacheable_var_names[key] then
t._cache[key] = val
end
return val
end,
__newindex = function(t, key, val)
if ngx_var_names[key] then
ngx_var[key] = val
end
-- log.info("key: ", key, " new val: ", val)
t._cache[key] = val
end,
}
---
-- Register custom variables.
-- Register variables globally, and use them as normal builtin variables.
-- Note that the custom variables can't be used in features that depend
-- on the Nginx directive, like `access_log_format`.
--
-- @function core.ctx.register_var
-- @tparam string name custom variable name
-- @tparam function getter The fetch function for custom variables.
-- @tparam table opts An optional options table which controls the behavior about the variable
-- @usage
-- local core = require "apisix.core"
--
-- core.ctx.register_var("a6_labels_zone", function(ctx)
-- local route = ctx.matched_route and ctx.matched_route.value
-- if route and route.labels then
-- return route.labels.zone
-- end
-- return nil
-- end)
--
-- We support the options below in the `opts`:
-- * no_cacheable: if the result of getter is cacheable or not. Default to `false`.
function _M.register_var(name, getter, opts)
if type(getter) ~= "function" then
error("the getter of registered var should be a function")
end
apisix_var_names[name] = getter
if opts then
if opts.no_cacheable then
no_cacheable_var_names[name] = true
end
end
end
function _M.set_vars_meta(ctx)
local var = tablepool.fetch("ctx_var", 0, 32)
if not var._cache then
var._cache = {}
end
var._request = get_request()
var._ctx = ctx
setmetatable(var, mt)
ctx.var = var
end
function _M.release_vars(ctx)
if ctx.var == nil then
return
end
core_tab.clear(ctx.var._cache)
tablepool.release("ctx_var", ctx.var, true)
ctx.var = nil
end
end -- do
return _M

View File

@@ -0,0 +1,164 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- Wrapped dns search client.
--
-- @module core.dns.client
local require = require
local config_local = require("apisix.core.config_local")
local log = require("apisix.core.log")
local json = require("apisix.core.json")
local table = require("apisix.core.table")
local gcd = require("apisix.core.math").gcd
local insert_tab = table.insert
local math_random = math.random
local package_loaded = package.loaded
local ipairs = ipairs
local table_remove = table.remove
local setmetatable = setmetatable
local _M = {
RETURN_RANDOM = 1,
RETURN_ALL = 2,
}
local function resolve_srv(client, answers)
if #answers == 0 then
return nil, "empty SRV record"
end
local resolved_answers = {}
local answer_to_count = {}
for _, answer in ipairs(answers) do
if answer.type ~= client.TYPE_SRV then
return nil, "mess SRV with other record"
end
local resolved, err = client.resolve(answer.target)
if not resolved then
local msg = "failed to resolve SRV record " .. answer.target .. ": " .. err
return nil, msg
end
log.info("dns resolve SRV ", answer.target, ", result: ",
json.delay_encode(resolved))
local weight = answer.weight
if weight == 0 then
weight = 1
end
local count = #resolved
answer_to_count[answer] = count
-- one target may have multiple resolved results
for _, res in ipairs(resolved) do
local copy = table.deepcopy(res)
copy.weight = weight / count
copy.port = answer.port
copy.priority = answer.priority
insert_tab(resolved_answers, copy)
end
end
-- find the least common multiple of the counts
local lcm = answer_to_count[answers[1]]
for i = 2, #answers do
local count = answer_to_count[answers[i]]
lcm = count * lcm / gcd(count, lcm)
end
-- fix the weight as the weight should be integer
for _, res in ipairs(resolved_answers) do
res.weight = res.weight * lcm
end
return resolved_answers
end
function _M.resolve(self, domain, selector)
local client = self.client
-- this function will dereference the CNAME records
local answers, err = client.resolve(domain)
if not answers then
return nil, "failed to query the DNS server: " .. err
end
if answers.errcode then
return nil, "server returned error code: " .. answers.errcode
.. ": " .. answers.errstr
end
if selector == _M.RETURN_ALL then
log.info("dns resolve ", domain, ", result: ", json.delay_encode(answers))
for _, answer in ipairs(answers) do
if answer.type == client.TYPE_SRV then
return resolve_srv(client, answers)
end
end
return table.deepcopy(answers)
end
local idx = math_random(1, #answers)
local answer = answers[idx]
local dns_type = answer.type
if dns_type == client.TYPE_A or dns_type == client.TYPE_AAAA then
log.info("dns resolve ", domain, ", result: ", json.delay_encode(answer))
return table.deepcopy(answer)
end
return nil, "unsupported DNS answer"
end
function _M.new(opts)
local local_conf = config_local.local_conf()
if opts.enable_ipv6 == nil then
opts.enable_ipv6 = local_conf.apisix.enable_ipv6
end
-- ensure the resolver throws an error when ipv6 is disabled
if not opts.enable_ipv6 then
for i, v in ipairs(opts.order) do
if v == "AAAA" then
table_remove(opts.order, i)
break
end
end
end
opts.timeout = 2000 -- 2 sec
opts.retrans = 5 -- 5 retransmissions on receive timeout
-- make sure each client has its separate room
package_loaded["resty.dns.client"] = nil
local dns_client_mod = require("resty.dns.client")
local ok, err = dns_client_mod.init(opts)
if not ok then
return nil, "failed to init the dns client: " .. err
end
return setmetatable({client = dns_client_mod}, {__index = _M})
end
return _M

View File

@@ -0,0 +1,109 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local ffi = require "ffi"
local json = require("apisix.core.json")
local log = require("apisix.core.log")
local string = require("apisix.core.string")
local os = os
local type = type
local upper = string.upper
local find = string.find
local sub = string.sub
local str = ffi.string
local ENV_PREFIX = "$ENV://"
local _M = {
PREFIX = ENV_PREFIX
}
local apisix_env_vars = {}
ffi.cdef [[
extern char **environ;
]]
function _M.init()
local e = ffi.C.environ
if not e then
log.warn("could not access environment variables")
return
end
local i = 0
while e[i] ~= nil do
local var = str(e[i])
local p = find(var, "=")
if p then
apisix_env_vars[sub(var, 1, p - 1)] = sub(var, p + 1)
end
i = i + 1
end
end
local function parse_env_uri(env_uri)
-- Avoid the error caused by has_prefix to cause a crash.
if type(env_uri) ~= "string" then
return nil, "error env_uri type: " .. type(env_uri)
end
if not string.has_prefix(upper(env_uri), ENV_PREFIX) then
return nil, "error env_uri prefix: " .. env_uri
end
local path = sub(env_uri, #ENV_PREFIX + 1)
local idx = find(path, "/")
if not idx then
return {key = path, sub_key = ""}
end
local key = sub(path, 1, idx - 1)
local sub_key = sub(path, idx + 1)
return {
key = key,
sub_key = sub_key
}
end
function _M.fetch_by_uri(env_uri)
log.info("fetching data from env uri: ", env_uri)
local opts, err = parse_env_uri(env_uri)
if not opts then
return nil, err
end
local main_value = apisix_env_vars[opts.key] or os.getenv(opts.key)
if main_value and opts.sub_key ~= "" then
local vt, err = json.decode(main_value)
if not vt then
return nil, "decode failed, err: " .. (err or "") .. ", value: " .. main_value
end
return vt[opts.sub_key]
end
return main_value
end
return _M

View File

@@ -0,0 +1,676 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- Etcd API.
--
-- @module core.etcd
local require = require
local fetch_local_conf = require("apisix.core.config_local").local_conf
local array_mt = require("apisix.core.json").array_mt
local log = require("apisix.core.log")
local try_read_attr = require("apisix.core.table").try_read_attr
local v3_adapter = require("apisix.admin.v3_adapter")
local etcd = require("resty.etcd")
local clone_tab = require("table.clone")
local health_check = require("resty.etcd.health_check")
local pl_path = require("pl.path")
local ipairs = ipairs
local setmetatable = setmetatable
local string = string
local tonumber = tonumber
local ngx_get_phase = ngx.get_phase
local _M = {}
local NOT_ALLOW_WRITE_ETCD_WARN = 'Data plane role should not write to etcd. ' ..
'This operation will be deprecated in future releases.'
local function is_data_plane()
local local_conf, err = fetch_local_conf()
if not local_conf then
return nil, err
end
local role = try_read_attr(local_conf, "deployment", "role")
if role == "data_plane" then
return true
end
return false
end
local function disable_write_if_data_plane()
local data_plane, err = is_data_plane()
if err then
log.error("failed to check data plane role: ", err)
return true, err
end
if data_plane then
-- current only warn, will be return false in future releases
-- to block etcd write
log.warn(NOT_ALLOW_WRITE_ETCD_WARN)
return false
end
return false, nil
end
local function wrap_etcd_client(etcd_cli)
-- note: methods txn can read and write, don't use txn to write when data plane role
local methods_to_wrap = {
"set",
"setnx",
"setx",
"delete",
"rmdir",
"grant",
"revoke",
"keepalive"
}
local original_methods = {}
for _, method in ipairs(methods_to_wrap) do
if not etcd_cli[method] then
log.error("method ", method, " not found in etcd client")
return nil, "method " .. method .. " not found in etcd client"
end
original_methods[method] = etcd_cli[method]
end
for _, method in ipairs(methods_to_wrap) do
etcd_cli[method] = function(self, ...)
local disable, err = disable_write_if_data_plane()
if disable then
return nil, err
end
return original_methods[method](self, ...)
end
end
return etcd_cli
end
local function _new(etcd_conf)
local prefix = etcd_conf.prefix
etcd_conf.http_host = etcd_conf.host
etcd_conf.host = nil
etcd_conf.prefix = nil
etcd_conf.protocol = "v3"
etcd_conf.api_prefix = "/v3"
-- default to verify etcd cluster certificate
etcd_conf.ssl_verify = true
if etcd_conf.tls then
if etcd_conf.tls.verify == false then
etcd_conf.ssl_verify = false
end
if etcd_conf.tls.cert then
etcd_conf.ssl_cert_path = etcd_conf.tls.cert
etcd_conf.ssl_key_path = etcd_conf.tls.key
end
if etcd_conf.tls.sni then
etcd_conf.sni = etcd_conf.tls.sni
end
end
local etcd_cli, err = etcd.new(etcd_conf)
if not etcd_cli then
return nil, nil, err
end
etcd_cli = wrap_etcd_client(etcd_cli)
return etcd_cli, prefix
end
---
-- Create an etcd client which will connect to etcd without being proxyed by conf server.
-- This method is used in init_worker phase when the conf server is not ready.
--
-- @function core.etcd.new_without_proxy
-- @treturn table|nil the etcd client, or nil if failed.
-- @treturn string|nil the configured prefix of etcd keys, or nil if failed.
-- @treturn nil|string the error message.
local function new_without_proxy()
local local_conf, err = fetch_local_conf()
if not local_conf then
return nil, nil, err
end
local etcd_conf = clone_tab(local_conf.etcd)
if local_conf.apisix.ssl and local_conf.apisix.ssl.ssl_trusted_certificate then
etcd_conf.trusted_ca = local_conf.apisix.ssl.ssl_trusted_certificate
end
return _new(etcd_conf)
end
_M.new_without_proxy = new_without_proxy
local function new()
local local_conf, err = fetch_local_conf()
if not local_conf then
return nil, nil, err
end
local etcd_conf = clone_tab(local_conf.etcd)
if local_conf.apisix.ssl and local_conf.apisix.ssl.ssl_trusted_certificate then
etcd_conf.trusted_ca = local_conf.apisix.ssl.ssl_trusted_certificate
end
if not health_check.conf then
health_check.init({
max_fails = 1,
retry = true,
})
end
return _new(etcd_conf)
end
_M.new = new
local function switch_proxy()
if ngx_get_phase() == "init" or ngx_get_phase() == "init_worker" then
return new_without_proxy()
end
local etcd_cli, prefix, err = new()
if not etcd_cli or err then
return etcd_cli, prefix, err
end
if not etcd_cli.unix_socket_proxy then
return etcd_cli, prefix, err
end
local sock_path = etcd_cli.unix_socket_proxy:sub(#"unix:" + 1)
local ok = pl_path.exists(sock_path)
if not ok then
return new_without_proxy()
end
return etcd_cli, prefix, err
end
_M.get_etcd_syncer = switch_proxy
-- convert ETCD v3 entry to v2 one
local function kvs_to_node(kvs)
local node = {}
node.key = kvs.key
node.value = kvs.value
node.createdIndex = tonumber(kvs.create_revision)
node.modifiedIndex = tonumber(kvs.mod_revision)
return node
end
_M.kvs_to_node = kvs_to_node
local function kvs_to_nodes(res, exclude_dir)
res.body.node.dir = true
res.body.node.nodes = setmetatable({}, array_mt)
if exclude_dir then
for i=2, #res.body.kvs do
res.body.node.nodes[i-1] = kvs_to_node(res.body.kvs[i])
end
else
for i=1, #res.body.kvs do
res.body.node.nodes[i] = kvs_to_node(res.body.kvs[i])
end
end
return res
end
local function not_found(res)
res.body.message = "Key not found"
res.reason = "Not found"
res.status = 404
return res
end
-- When `is_dir` is true, returns the value of both the dir key and its descendants.
-- Otherwise, return the value of key only.
function _M.get_format(res, real_key, is_dir, formatter)
if res.body.error == "etcdserver: user name is empty" then
return nil, "insufficient credentials code: 401"
end
if res.body.error == "etcdserver: permission denied" then
return nil, "etcd forbidden code: 403"
end
if res.body.error then
-- other errors, like "grpc: received message larger than max"
return nil, res.body.error
end
res.headers["X-Etcd-Index"] = res.body.header.revision
if not res.body.kvs then
return not_found(res)
end
v3_adapter.to_v3(res.body, "get")
if formatter then
return formatter(res)
end
if not is_dir then
local key = res.body.kvs[1].key
if key ~= real_key then
return not_found(res)
end
res.body.node = kvs_to_node(res.body.kvs[1])
else
-- In etcd v2, the direct key asked for is `node`, others which under this dir are `nodes`
-- While in v3, this structure is flatten and all keys related the key asked for are `kvs`
res.body.node = kvs_to_node(res.body.kvs[1])
-- we have a init_dir (for etcd v2) value that can't be deserialized with json,
-- but we don't put init_dir for new resource type like consumer credential
if not res.body.kvs[1].value then
-- remove last "/" when necessary
if string.byte(res.body.node.key, -1) == 47 then
res.body.node.key = string.sub(res.body.node.key, 1, #res.body.node.key-1)
end
res = kvs_to_nodes(res, true)
else
-- get dir key by remove last part of node key,
-- for example: /apisix/consumers/jack -> /apisix/consumers
local last_slash_index = string.find(res.body.node.key, "/[^/]*$")
if last_slash_index then
res.body.node.key = string.sub(res.body.node.key, 1, last_slash_index-1)
end
res = kvs_to_nodes(res, false)
end
end
res.body.kvs = nil
v3_adapter.to_v3_list(res.body)
return res
end
function _M.watch_format(v3res)
local v2res = {}
v2res.headers = {
["X-Etcd-Index"] = v3res.result.header.revision
}
v2res.body = {
node = {}
}
local compact_revision = v3res.result.compact_revision
if compact_revision and tonumber(compact_revision) > 0 then
-- When the revisions are compacted, there might be compacted changes
-- which are unsynced. So we need to do a fully sync.
-- TODO: cover this branch in CI
return nil, "compacted"
end
for i, event in ipairs(v3res.result.events) do
v2res.body.node[i] = kvs_to_node(event.kv)
if event.type == "DELETE" then
v2res.body.action = "delete"
end
end
return v2res
end
local get_etcd_cli
do
local prefix
local etcd_cli_init_phase
local etcd_cli
local tmp_etcd_cli
function get_etcd_cli()
local err
if ngx_get_phase() == "init" or ngx_get_phase() == "init_worker" then
if etcd_cli_init_phase == nil then
tmp_etcd_cli, prefix, err = new_without_proxy()
if not tmp_etcd_cli then
return nil, nil, err
end
return tmp_etcd_cli, prefix
end
return etcd_cli_init_phase, prefix
end
if etcd_cli_init_phase ~= nil then
-- we can't share the etcd instance created in init* phase
-- they have different configuration
etcd_cli_init_phase:close()
etcd_cli_init_phase = nil
end
if etcd_cli == nil then
tmp_etcd_cli, prefix, err = switch_proxy()
if not tmp_etcd_cli then
return nil, nil, err
end
etcd_cli = tmp_etcd_cli
return tmp_etcd_cli, prefix
end
return etcd_cli, prefix
end
end
-- export it so we can mock the etcd cli in test
_M.get_etcd_cli = get_etcd_cli
function _M.get(key, is_dir)
local etcd_cli, prefix, err = get_etcd_cli()
if not etcd_cli then
return nil, err
end
key = prefix .. key
-- in etcd v2, get could implicitly turn into readdir
-- while in v3, we need to do it explicitly
local res, err = etcd_cli:readdir(key)
if not res then
return nil, err
end
return _M.get_format(res, key, is_dir)
end
local function set(key, value, ttl)
local disable, err = disable_write_if_data_plane()
if disable then
return nil, err
end
local etcd_cli, prefix, err = get_etcd_cli()
if not etcd_cli then
return nil, err
end
-- lease substitute ttl in v3
local res, err
if ttl then
local data, grant_err = etcd_cli:grant(tonumber(ttl))
if not data then
return nil, grant_err
end
res, err = etcd_cli:set(prefix .. key, value, {prev_kv = true, lease = data.body.ID})
if not res then
return nil, err
end
res.body.lease_id = data.body.ID
else
res, err = etcd_cli:set(prefix .. key, value, {prev_kv = true})
end
if not res then
return nil, err
end
if res.body.error then
return nil, res.body.error
end
res.headers["X-Etcd-Index"] = res.body.header.revision
-- etcd v3 set would not return kv info
v3_adapter.to_v3(res.body, "set")
res.body.node = {}
res.body.node.key = prefix .. key
res.body.node.value = value
res.status = 201
if res.body.prev_kv then
res.status = 200
res.body.prev_kv = nil
end
return res, nil
end
_M.set = set
function _M.atomic_set(key, value, ttl, mod_revision)
local disable, err = disable_write_if_data_plane()
if disable then
return nil, err
end
local etcd_cli, prefix, err = get_etcd_cli()
if not etcd_cli then
return nil, err
end
local lease_id
if ttl then
local data, grant_err = etcd_cli:grant(tonumber(ttl))
if not data then
return nil, grant_err
end
lease_id = data.body.ID
end
key = prefix .. key
local compare = {
{
key = key,
target = "MOD",
result = "EQUAL",
mod_revision = mod_revision,
}
}
local success = {
{
requestPut = {
key = key,
value = value,
lease = lease_id,
}
}
}
local res, err = etcd_cli:txn(compare, success)
if not res then
return nil, err
end
if not res.body.succeeded then
return nil, "value changed before overwritten"
end
res.headers["X-Etcd-Index"] = res.body.header.revision
-- etcd v3 set would not return kv info
v3_adapter.to_v3(res.body, "compareAndSwap")
res.body.node = {
key = key,
value = value,
}
return res, nil
end
function _M.push(key, value, ttl)
local disable, err = disable_write_if_data_plane()
if disable then
return nil, err
end
local etcd_cli, _, err = get_etcd_cli()
if not etcd_cli then
return nil, err
end
-- Create a new revision and use it as the id.
-- It will be better if we use snowflake algorithm like manager-api,
-- but we haven't found a good library. It costs too much to write
-- our own one as the admin-api will be replaced by manager-api finally.
local res, err = set("/gen_id", 1)
if not res then
return nil, err
end
-- manually add suffix
local index = res.body.header.revision
index = string.format("%020d", index)
-- set the basic id attribute
value.id = index
res, err = set(key .. "/" .. index, value, ttl)
if not res then
return nil, err
end
v3_adapter.to_v3(res.body, "create")
return res, nil
end
function _M.delete(key)
local disable, err = disable_write_if_data_plane()
if disable then
return nil, err
end
local etcd_cli, prefix, err = get_etcd_cli()
if not etcd_cli then
return nil, err
end
local res, err = etcd_cli:delete(prefix .. key)
if not res then
return nil, err
end
res.headers["X-Etcd-Index"] = res.body.header.revision
if not res.body.deleted then
return not_found(res), nil
end
-- etcd v3 set would not return kv info
v3_adapter.to_v3(res.body, "delete")
res.body.node = {}
res.body.key = prefix .. key
return res, nil
end
function _M.rmdir(key, opts)
local disable, err = disable_write_if_data_plane()
if disable then
return nil, err
end
local etcd_cli, prefix, err = get_etcd_cli()
if not etcd_cli then
return nil, err
end
local res, err = etcd_cli:rmdir(prefix .. key, opts)
if not res then
return nil, err
end
res.headers["X-Etcd-Index"] = res.body.header.revision
if not res.body.deleted then
return not_found(res), nil
end
v3_adapter.to_v3(res.body, "delete")
res.body.node = {}
res.body.key = prefix .. key
return res, nil
end
---
-- Get etcd cluster and server version.
--
-- @function core.etcd.server_version
-- @treturn table The response of query etcd server version.
-- @usage
-- local res, err = core.etcd.server_version()
-- -- the res.body is as follows:
-- -- {
-- -- etcdcluster = "3.5.0",
-- -- etcdserver = "3.5.0"
-- -- }
function _M.server_version()
local etcd_cli, _, err = get_etcd_cli()
if not etcd_cli then
return nil, err
end
return etcd_cli:version()
end
function _M.keepalive(id)
local disable, err = disable_write_if_data_plane()
if disable then
return nil, err
end
local etcd_cli, _, err = get_etcd_cli()
if not etcd_cli then
return nil, err
end
local res, err = etcd_cli:keepalive(id)
if not res then
return nil, err
end
return res, nil
end
return _M

View File

@@ -0,0 +1,45 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local CONST = {
BUILD_ROUTER = 1,
}
local _M = {
CONST = CONST,
}
local events = {}
function _M.push(type, ...)
local handler = events[type]
if handler then
handler(...)
end
end
function _M.register(type, handler)
-- TODO: we can register more than one handler
events[type] = handler
end
function _M.unregister(type)
events[type] = nil
end
return _M

View File

@@ -0,0 +1,169 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- Instance id of APISIX
--
-- @module core.id
local fetch_local_conf = require("apisix.core.config_local").local_conf
local try_read_attr = require("apisix.core.table").try_read_attr
local profile = require("apisix.core.profile")
local log = require("apisix.core.log")
local uuid = require("resty.jit-uuid")
local lyaml = require("lyaml")
local smatch = string.match
local open = io.open
local type = type
local ipairs = ipairs
local string = string
local math = math
local prefix = ngx.config.prefix()
local pairs = pairs
local ngx_exit = ngx.exit
local apisix_uid
local _M = {version = 0.1}
local function rtrim(str)
return smatch(str, "^(.-)%s*$")
end
local function read_file(path)
local file = open(path, "rb") -- r read mode and b binary mode
if not file then
return nil
end
local content = file:read("*a") -- *a or *all reads the whole file
file:close()
return rtrim(content)
end
local function write_file(path, data)
local file = open(path, "w+")
if not file then
return nil, "failed to open file[" .. path .. "] for writing"
end
file:write(data)
file:close()
return true
end
local function generate_yaml(table)
-- By default lyaml will parse null values as []
-- The following logic is a workaround so that null values are parsed as null
local function replace_null(tbl)
for k, v in pairs(tbl) do
if type(v) == "table" then
replace_null(v)
elseif v == nil then
tbl[k] = "<PLACEHOLDER>"
end
end
end
-- Replace null values with "<PLACEHOLDER>"
replace_null(table)
local yaml = lyaml.dump({ table })
yaml = yaml:gsub("<PLACEHOLDER>", "null"):gsub("%[%s*%]", "null")
return yaml
end
_M.gen_uuid_v4 = uuid.generate_v4
--- This will autogenerate the admin key if it's passed as an empty string in the configuration.
local function autogenerate_admin_key(default_conf)
local changed = false
-- Check if deployment.role is either traditional or control_plane
local deployment_role = default_conf.deployment and default_conf.deployment.role
if deployment_role and (deployment_role == "traditional" or
deployment_role == "control_plane") then
-- Check if deployment.admin.admin_key is not nil and it's an empty string
local admin_keys = try_read_attr(default_conf, "deployment", "admin", "admin_key")
if admin_keys and type(admin_keys) == "table" then
for i, admin_key in ipairs(admin_keys) do
if admin_key.role == "admin" and admin_key.key == "" then
changed = true
admin_keys[i].key = ""
for _ = 1, 32 do
admin_keys[i].key = admin_keys[i].key ..
string.char(math.random(65, 90) + math.random(0, 1) * 32)
end
end
end
end
end
return default_conf,changed
end
function _M.init()
local local_conf = fetch_local_conf()
local local_conf, changed = autogenerate_admin_key(local_conf)
if changed then
local yaml_conf = generate_yaml(local_conf)
local local_conf_path = profile:yaml_path("config")
local ok, err = write_file(local_conf_path, yaml_conf)
if not ok then
log.error("failed to write updated local configuration: ", err)
ngx_exit(-1)
end
end
--allow user to specify a meaningful id as apisix instance id
local uid_file_path = prefix .. "/conf/apisix.uid"
apisix_uid = read_file(uid_file_path)
if apisix_uid then
return
end
local id = try_read_attr(local_conf, "apisix", "id")
if id then
apisix_uid = local_conf.apisix.id
else
uuid.seed()
apisix_uid = uuid.generate_v4()
log.notice("not found apisix uid, generate a new one: ", apisix_uid)
end
local ok, err = write_file(uid_file_path, apisix_uid)
if not ok then
log.error(err)
end
end
---
-- Returns the instance id of the running APISIX
--
-- @function core.id.get
-- @treturn string the instance id
-- @usage
-- local apisix_id = core.id.get()
function _M.get()
return apisix_uid
end
return _M

View File

@@ -0,0 +1,50 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- I/O operations on files.
--
-- @module core.io
local open = io.open
local _M = {}
---
-- Read the contents of a file.
--
-- @function core.io.get_file
-- @tparam string file_name either an absolute path or
-- a relative path based on the APISIX working directory.
-- @treturn string The file content.
-- @usage
-- local file_content, err = core.io.get_file("conf/apisix.uid")
-- -- the `file_content` maybe the APISIX instance id in uuid format,
-- -- like "3f0e827b-5f26-440e-8074-c101c8eb0174"
function _M.get_file(file_name)
local f, err = open(file_name, 'r')
if not f then
return nil, err
end
local req_body = f:read("*all")
f:close()
return req_body
end
return _M

View File

@@ -0,0 +1,80 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- IP match and verify module.
--
-- @module core.ip
local json = require("apisix.core.json")
local log = require("apisix.core.log")
local ipmatcher = require("resty.ipmatcher")
local str_sub = string.sub
local str_find = require("apisix.core.string").find
local tonumber = tonumber
local _M = {}
function _M.create_ip_matcher(ip_list)
local ip, err = ipmatcher.new(ip_list)
if not ip then
log.error("failed to create ip matcher: ", err,
" ip list: ", json.delay_encode(ip_list))
return nil
end
return ip
end
---
-- Verify that the given ip is a valid ip or cidr.
--
-- @function core.ip.validate_cidr_or_ip
-- @tparam string ip IP or cidr.
-- @treturn boolean True if the given ip is a valid ip or cidr, false otherwise.
-- @usage
-- local ip1 = core.ip.validate_cidr_or_ip("127.0.0.1") -- true
-- local cidr = core.ip.validate_cidr_or_ip("113.74.26.106/24") -- true
-- local ip2 = core.ip.validate_cidr_or_ip("113.74.26.666") -- false
function _M.validate_cidr_or_ip(ip)
local mask = 0
local sep_pos = str_find(ip, "/")
if sep_pos then
mask = str_sub(ip, sep_pos + 1)
mask = tonumber(mask)
if mask < 0 or mask > 128 then
return false
end
ip = str_sub(ip, 1, sep_pos - 1)
end
if ipmatcher.parse_ipv4(ip) then
if mask < 0 or mask > 32 then
return false
end
return true
end
if mask < 0 or mask > 128 then
return false
end
return ipmatcher.parse_ipv6(ip)
end
return _M

View File

@@ -0,0 +1,132 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- Wrapped serialization and deserialization modules for json and lua tables.
--
-- @module core.json
local cjson = require("cjson.safe")
local json_encode = cjson.encode
local clear_tab = require("table.clear")
local ngx = ngx
local tostring = tostring
local type = type
local pairs = pairs
local cached_tab = {}
cjson.encode_escape_forward_slash(false)
cjson.decode_array_with_array_mt(true)
local _M = {
version = 0.1,
array_mt = cjson.array_mt,
decode = cjson.decode,
-- This method produces the same encoded string when the input is not changed.
-- Different calls with cjson.encode will produce different string because
-- it doesn't maintain the object key order.
stably_encode = require("dkjson").encode
}
local function serialise_obj(data)
if type(data) == "function" or type(data) == "userdata"
or type(data) == "cdata"
or type(data) == "table" then
return tostring(data)
end
return data
end
local function tab_clone_with_serialise(data)
if type(data) ~= "table" then
return serialise_obj(data)
end
local t = {}
for k, v in pairs(data) do
if type(v) == "table" then
if cached_tab[v] then
t[serialise_obj(k)] = tostring(v)
else
cached_tab[v] = true
t[serialise_obj(k)] = tab_clone_with_serialise(v)
end
else
t[serialise_obj(k)] = serialise_obj(v)
end
end
return t
end
local function encode(data, force)
if force then
clear_tab(cached_tab)
data = tab_clone_with_serialise(data)
end
return json_encode(data)
end
_M.encode = encode
local max_delay_encode_items = 16
local delay_tab_idx = 0
local delay_tab_arr = {}
for i = 1, max_delay_encode_items do
delay_tab_arr[i] = setmetatable({data = "", force = false}, {
__tostring = function(self)
local res, err = encode(self.data, self.force)
if not res then
ngx.log(ngx.WARN, "failed to encode: ", err,
" force: ", self.force)
end
return res
end
})
end
---
-- Delayed encoding of input data, avoid unnecessary encode operations.
-- When really writing logs, if the given parameter is table, it will be converted to string in
-- OpenResty by checking if there is a metamethod registered for `__tostring`, and if so,
-- calling this method to convert it to string.
--
-- @function core.json.delay_encode
-- @tparam string|table data The data to be encoded.
-- @tparam boolean force encode data can't be encoded as JSON with tostring
-- @treturn table The table with the __tostring function overridden.
-- @usage
-- core.log.info("conf : ", core.json.delay_encode(conf))
function _M.delay_encode(data, force)
delay_tab_idx = delay_tab_idx+1
if delay_tab_idx > max_delay_encode_items then
delay_tab_idx = 1
end
delay_tab_arr[delay_tab_idx].data = data
delay_tab_arr[delay_tab_idx].force = force
return delay_tab_arr[delay_tab_idx]
end
return _M

View File

@@ -0,0 +1,173 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- Wrapped `ngx.log`.
--
-- @module core.log
local ngx = ngx
local ngx_log = ngx.log
local require = require
local select = select
local setmetatable = setmetatable
local tostring = tostring
local unpack = unpack
-- avoid loading other module since core.log is the most foundational one
local tab_clear = require("table.clear")
local ngx_errlog = require("ngx.errlog")
local ngx_get_phase = ngx.get_phase
local _M = {version = 0.4}
local log_levels = {
stderr = ngx.STDERR,
emerg = ngx.EMERG,
alert = ngx.ALERT,
crit = ngx.CRIT,
error = ngx.ERR,
warn = ngx.WARN,
notice = ngx.NOTICE,
info = ngx.INFO,
debug = ngx.DEBUG,
}
local cur_level
local do_nothing = function() end
local function update_log_level()
-- Nginx use `notice` level in init phase instead of error_log directive config
-- Ref to src/core/ngx_log.c's ngx_log_init
if ngx_get_phase() ~= "init" then
cur_level = ngx.config.subsystem == "http" and ngx_errlog.get_sys_filter_level()
end
end
function _M.new(prefix)
local m = {version = _M.version}
setmetatable(m, {__index = function(self, cmd)
local log_level = log_levels[cmd]
local method
update_log_level()
if cur_level and (log_level > cur_level)
then
method = do_nothing
else
method = function(...)
return ngx_log(log_level, prefix, ...)
end
end
-- cache the lazily generated method in our
-- module table
if ngx_get_phase() ~= "init" then
self[cmd] = method
end
return method
end})
return m
end
setmetatable(_M, {__index = function(self, cmd)
local log_level = log_levels[cmd]
local method
update_log_level()
if cur_level and (log_level > cur_level)
then
method = do_nothing
else
method = function(...)
return ngx_log(log_level, ...)
end
end
-- cache the lazily generated method in our
-- module table
if ngx_get_phase() ~= "init" then
self[cmd] = method
end
return method
end})
local delay_tab = setmetatable({
func = function() end,
args = {},
res = nil,
}, {
__tostring = function(self)
-- the `__tostring` will be called twice, the first to get the length and
-- the second to get the data
if self.res then
local res = self.res
-- avoid unexpected reference
self.res = nil
return res
end
local res, err = self.func(unpack(self.args))
if err then
ngx.log(ngx.WARN, "failed to exec: ", err)
end
-- avoid unexpected reference
tab_clear(self.args)
self.res = tostring(res)
return self.res
end
})
---
-- Delayed execute log printing.
-- It works well with log.$level, eg: log.info(..., log.delay_exec(func, ...))
-- Should not use it elsewhere.
--
-- @function core.log.delay_exec
-- @tparam function func Functions that need to be delayed during log printing.
-- @treturn table The table with the res attribute overridden.
-- @usage
-- local function delay_func(param1, param2)
-- return param1 .. " " .. param2
-- end
-- core.log.info("delay log print: ", core.log.delay_exec(delay_func, "hello", "world))
-- -- then the log will be: "delay log print: hello world"
function _M.delay_exec(func, ...)
delay_tab.func = func
tab_clear(delay_tab.args)
for i = 1, select('#', ...) do
delay_tab.args[i] = select(i, ...)
end
delay_tab.res = nil
return delay_tab
end
return _M

View File

@@ -0,0 +1,193 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- LRU Caching Implementation.
--
-- @module core.lrucache
local lru_new = require("resty.lrucache").new
local resty_lock = require("resty.lock")
local log = require("apisix.core.log")
local tostring = tostring
local ngx = ngx
local get_phase = ngx.get_phase
local lock_shdict_name = "lrucache-lock"
if ngx.config.subsystem == "stream" then
lock_shdict_name = lock_shdict_name .. "-" .. ngx.config.subsystem
end
local can_yield_phases = {
ssl_session_fetch = true,
ssl_session_store = true,
rewrite = true,
access = true,
content = true,
timer = true
}
local GLOBAL_ITEMS_COUNT = 1024
local GLOBAL_TTL = 60 * 60 -- 60 min
local PLUGIN_TTL = 5 * 60 -- 5 min
local PLUGIN_ITEMS_COUNT = 8
local global_lru_fun
local function fetch_valid_cache(lru_obj, invalid_stale, item_ttl,
item_release, key, version)
local obj, stale_obj = lru_obj:get(key)
if obj and obj.ver == version then
return obj
end
if not invalid_stale and stale_obj and stale_obj.ver == version then
lru_obj:set(key, stale_obj, item_ttl)
return stale_obj
end
if item_release and obj then
item_release(obj.val)
end
return nil
end
local function new_lru_fun(opts)
local item_count, item_ttl
if opts and opts.type == 'plugin' then
item_count = opts.count or PLUGIN_ITEMS_COUNT
item_ttl = opts.ttl or PLUGIN_TTL
else
item_count = opts and opts.count or GLOBAL_ITEMS_COUNT
item_ttl = opts and opts.ttl or GLOBAL_TTL
end
local item_release = opts and opts.release
local invalid_stale = opts and opts.invalid_stale
local serial_creating = opts and opts.serial_creating
local lru_obj = lru_new(item_count)
return function (key, version, create_obj_fun, ...)
if not serial_creating or not can_yield_phases[get_phase()] then
local cache_obj = fetch_valid_cache(lru_obj, invalid_stale,
item_ttl, item_release, key, version)
if cache_obj then
return cache_obj.val
end
local obj, err = create_obj_fun(...)
if obj ~= nil then
lru_obj:set(key, {val = obj, ver = version}, item_ttl)
end
return obj, err
end
local cache_obj = fetch_valid_cache(lru_obj, invalid_stale, item_ttl,
item_release, key, version)
if cache_obj then
return cache_obj.val
end
local lock, err = resty_lock:new(lock_shdict_name)
if not lock then
return nil, "failed to create lock: " .. err
end
local key_s = tostring(key)
log.info("try to lock with key ", key_s)
local elapsed, err = lock:lock(key_s)
if not elapsed then
return nil, "failed to acquire the lock: " .. err
end
cache_obj = fetch_valid_cache(lru_obj, invalid_stale, item_ttl,
nil, key, version)
if cache_obj then
lock:unlock()
log.info("unlock with key ", key_s)
return cache_obj.val
end
local obj, err = create_obj_fun(...)
if obj ~= nil then
lru_obj:set(key, {val = obj, ver = version}, item_ttl)
end
lock:unlock()
log.info("unlock with key ", key_s)
return obj, err
end
end
global_lru_fun = new_lru_fun()
local function plugin_ctx_key_and_ver(api_ctx, extra_key)
local key = api_ctx.conf_type .. "#" .. api_ctx.conf_id
if extra_key then
key = key .. "#" .. extra_key
end
return key, api_ctx.conf_version
end
---
-- Cache some objects for plugins to avoid duplicate resources creation.
--
-- @function core.lrucache.plugin_ctx
-- @tparam table lrucache LRUCache object instance.
-- @tparam table api_ctx The request context.
-- @tparam string extra_key Additional parameters for generating the lrucache identification key.
-- @tparam function create_obj_func Functions for creating cache objects.
-- If the object does not exist in the lrucache, this function is
-- called to create it and cache it in the lrucache.
-- @treturn table The object cached in lrucache.
-- @usage
-- local function create_obj() {
-- -- create the object
-- -- return the object
-- }
-- local obj, err = core.lrucache.plugin_ctx(lrucache, ctx, nil, create_obj)
-- -- obj is the object cached in lrucache
local function plugin_ctx(lrucache, api_ctx, extra_key, create_obj_func, ...)
local key, ver = plugin_ctx_key_and_ver(api_ctx, extra_key)
return lrucache(key, ver, create_obj_func, ...)
end
local function plugin_ctx_id(api_ctx, extra_key)
local key, ver = plugin_ctx_key_and_ver(api_ctx, extra_key)
return key .. "#" .. ver
end
local _M = {
version = 0.1,
new = new_lru_fun,
global = global_lru_fun,
plugin_ctx = plugin_ctx,
plugin_ctx_id = plugin_ctx_id,
}
return _M

View File

@@ -0,0 +1,41 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- Common library about math
--
-- @module core.math
local _M = {}
---
-- Calculate the greatest common divisor (GCD) of two numbers
--
-- @function core.math.gcd
-- @tparam number a
-- @tparam number b
-- @treturn number the GCD of a and b
local function gcd(a, b)
if b == 0 then
return a
end
return gcd(b, a % b)
end
_M.gcd = gcd
return _M

View File

@@ -0,0 +1,118 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- OS module.
--
-- @module core.os
local ffi = require("ffi")
local ffi_str = ffi.string
local ffi_errno = ffi.errno
local C = ffi.C
local ceil = math.ceil
local floor = math.floor
local error = error
local tostring = tostring
local type = type
local _M = {}
local WNOHANG = 1
ffi.cdef[[
typedef int32_t pid_t;
typedef unsigned int useconds_t;
int setenv(const char *name, const char *value, int overwrite);
char *strerror(int errnum);
int usleep(useconds_t usec);
pid_t waitpid(pid_t pid, int *wstatus, int options);
]]
local function err()
return ffi_str(C.strerror(ffi_errno()))
end
---
-- Sets the value of the environment variable.
--
-- @function core.os.setenv
-- @tparam string name The name of environment variable.
-- @tparam string value The value of environment variable.
-- @treturn boolean Results of setting environment variables, true on success.
-- @usage
-- local ok, err = core.os.setenv("foo", "bar")
function _M.setenv(name, value)
local tv = type(value)
if type(name) ~= "string" or (tv ~= "string" and tv ~= "number") then
return false, "invalid argument"
end
value = tostring(value)
local ok = C.setenv(name, value, 1) == 0
if not ok then
return false, err()
end
return true
end
---
-- sleep blockingly in microseconds
--
-- @function core.os.usleep
-- @tparam number us The number of microseconds.
local function usleep(us)
if ceil(us) ~= floor(us) then
error("bad microseconds: " .. us)
end
C.usleep(us)
end
_M.usleep = usleep
local function waitpid_nohang(pid)
local res = C.waitpid(pid, nil, WNOHANG)
if res == -1 then
return nil, err()
end
return res > 0
end
function _M.waitpid(pid, timeout)
local count = 0
local step = 1000 * 10
local total = timeout * 1000 * 1000
while step * count < total do
count = count + 1
usleep(step)
local ok, err = waitpid_nohang(pid)
if err then
return nil, err
end
if ok then
return true
end
end
end
return _M

View File

@@ -0,0 +1,67 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- Profile module.
--
-- @module core.profile
local util = require("apisix.cli.util")
local _M = {
version = 0.1,
profile = os.getenv("APISIX_PROFILE") or "",
apisix_home = (ngx and ngx.config.prefix()) or ""
}
---
-- Get yaml file path by filename under the `conf/`.
--
-- @function core.profile.yaml_path
-- @tparam self self The profile module itself.
-- @tparam string file_name Name of the yaml file to search.
-- @treturn string The path of yaml file searched.
-- @usage
-- local profile = require("apisix.core.profile")
-- ......
-- -- set the working directory of APISIX
-- profile.apisix_home = env.apisix_home .. "/"
-- local local_conf_path = profile:yaml_path("config")
function _M.yaml_path(self, file_name)
local file_path = self.apisix_home .. "conf/" .. file_name
if self.profile ~= "" and file_name ~= "config-default" then
file_path = file_path .. "-" .. self.profile
end
return file_path .. ".yaml"
end
function _M.customized_yaml_index(self)
return self.apisix_home .. "/conf/.customized_config_path"
end
function _M.customized_yaml_path(self)
local customized_config_index = self:customized_yaml_index()
if util.file_exists(customized_config_index) then
return util.read_file(customized_config_index)
end
return nil
end
return _M

View File

@@ -0,0 +1,238 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- Extensible framework to support publish-and-subscribe scenarios
--
-- @module core.pubsub
local log = require("apisix.core.log")
local ws_server = require("resty.websocket.server")
local protoc = require("protoc")
local pb = require("pb")
local ngx = ngx
local setmetatable = setmetatable
local pcall = pcall
local _M = { version = 0.1 }
local mt = { __index = _M }
local pb_state
local function init_pb_state()
-- clear current pb state
local old_pb_state = pb.state(nil)
-- set int64 rule for pubsub module
pb.option("int64_as_string")
-- initialize protoc compiler
protoc.reload()
local pubsub_protoc = protoc.new()
pubsub_protoc:addpath(ngx.config.prefix() .. "apisix/include/apisix/model")
local ok, err = pcall(pubsub_protoc.loadfile, pubsub_protoc, "pubsub.proto")
if not ok then
pubsub_protoc:reset()
pb.state(old_pb_state)
return "failed to load pubsub protocol: " .. err
end
pb_state = pb.state(old_pb_state)
end
-- parse command name and parameters from client message
local function get_cmd(data)
-- There are sequence and command properties in the data,
-- select the handler according to the command value.
local key = data.req
return key, data[key]
end
-- send generic response to client
local function send_resp(ws, sequence, data)
data.sequence = sequence
-- only restore state if it has changed
if pb_state ~= pb.state() then
pb.state(pb_state)
end
local ok, encoded = pcall(pb.encode, "PubSubResp", data)
if not ok or not encoded then
log.error("failed to encode response message, err: ", encoded)
return
end
local _, err = ws:send_binary(encoded)
if err then
log.error("failed to send response to client, err: ", err)
end
end
-- send error response to client
local function send_error(ws, sequence, err_msg)
return send_resp(ws, sequence, {
error_resp = {
code = 0,
message = err_msg,
},
})
end
---
-- Create pubsub module instance
--
-- @function core.pubsub.new
-- @treturn pubsub module instance
-- @treturn string|nil error message if present
-- @usage
-- local pubsub, err = core.pubsub.new()
function _M.new()
if not pb_state then
local err = init_pb_state()
if err then
return nil, err
end
end
local ws, err = ws_server:new()
if not ws then
return nil, err
end
local obj = setmetatable({
ws_server = ws,
cmd_handler = {},
}, mt)
-- add default ping handler
obj:on("cmd_ping", function (params)
return { pong_resp = params }
end)
return obj
end
---
-- Add command callbacks to pubsub module instances
--
-- The callback function prototype: function (params)
-- The params in the parameters contain the data defined in the requested command.
-- Its first return value is the data, which needs to contain the data needed for
-- the particular resp, returns nil if an error exists.
-- Its second return value is a string type error message, no need to return when
-- no error exists.
--
-- @function core.pubsub.on
-- @tparam string command The command to add callback.
-- @tparam func handler The callback function on receipt of command.
-- @usage
-- pubsub:on(command, function (params)
-- return data, err
-- end)
function _M.on(self, command, handler)
self.cmd_handler[command] = handler
end
---
-- Put the pubsub instance into an event loop, waiting to process client commands
--
-- @function core.pubsub.wait
-- @usage
-- local err = pubsub:wait()
function _M.wait(self)
local fatal_err
local ws = self.ws_server
while true do
-- read raw data frames from websocket connection
local raw_data, raw_type, err = ws:recv_frame()
if err then
-- terminate the event loop when a fatal error occurs
if ws.fatal then
fatal_err = err
break
end
-- skip this loop for non-fatal errors
log.error("failed to receive websocket frame: ", err)
goto continue
end
-- handle client close connection
if raw_type == "close" then
break
end
-- the pubsub messages use binary, if the message is not
-- binary, skip this message
if raw_type ~= "binary" then
log.warn("pubsub server receive non-binary data, type: ",
raw_type, ", data: ", raw_data)
goto continue
end
-- only recover state if it has changed
if pb.state() ~= pb_state then
pb.state(pb_state)
end
local data, err = pb.decode("PubSubReq", raw_data)
if not data then
log.error("pubsub server receives undecodable data, err: ", err)
send_error(ws, 0, "wrong command")
goto continue
end
-- command sequence code
local sequence = data.sequence
local cmd, params = get_cmd(data)
if not cmd and not params then
log.warn("pubsub server receives empty command")
goto continue
end
-- find the handler for the current command
local handler = self.cmd_handler[cmd]
if not handler then
log.error("pubsub callback handler not registered for the",
" command, command: ", cmd)
send_error(ws, sequence, "unknown command")
goto continue
end
-- call command handler to generate response data
local resp, err = handler(params)
if not resp then
send_error(ws, sequence, err)
goto continue
end
send_resp(ws, sequence, resp)
::continue::
end
if fatal_err then
log.error("fatal error in pubsub websocket server, err: ", fatal_err)
end
ws:send_close()
end
return _M

View File

@@ -0,0 +1,382 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- Get or set the information of the client request.
--
-- @module core.request
local lfs = require("lfs")
local log = require("apisix.core.log")
local json = require("apisix.core.json")
local io = require("apisix.core.io")
local req_add_header
if ngx.config.subsystem == "http" then
local ngx_req = require "ngx.req"
req_add_header = ngx_req.add_header
end
local is_apisix_or, a6_request = pcall(require, "resty.apisix.request")
local ngx = ngx
local get_headers = ngx.req.get_headers
local clear_header = ngx.req.clear_header
local tonumber = tonumber
local error = error
local type = type
local str_fmt = string.format
local str_lower = string.lower
local req_read_body = ngx.req.read_body
local req_get_body_data = ngx.req.get_body_data
local req_get_body_file = ngx.req.get_body_file
local req_get_post_args = ngx.req.get_post_args
local req_get_uri_args = ngx.req.get_uri_args
local req_set_uri_args = ngx.req.set_uri_args
local table_insert = table.insert
local req_set_header = ngx.req.set_header
local _M = {}
local function _headers(ctx)
if not ctx then
ctx = ngx.ctx.api_ctx
end
if not is_apisix_or then
return get_headers()
end
if a6_request.is_request_header_set() then
a6_request.clear_request_header()
ctx.headers = get_headers()
end
local headers = ctx.headers
if not headers then
headers = get_headers()
ctx.headers = headers
end
return headers
end
local function _validate_header_name(name)
local tname = type(name)
if tname ~= "string" then
return nil, str_fmt("invalid header name %q: got %s, " ..
"expected string", name, tname)
end
return name
end
---
-- Returns all headers of the current request.
-- The name and value of the header in return table is in lower case.
--
-- @function core.request.headers
-- @tparam table ctx The context of the current request.
-- @treturn table all headers
-- @usage
-- local headers = core.request.headers(ctx)
_M.headers = _headers
---
-- Returns the value of the header with the specified name.
--
-- @function core.request.header
-- @tparam table ctx The context of the current request.
-- @tparam string name The header name, example: "Content-Type".
-- @treturn string|nil the value of the header, or nil if not found.
-- @usage
-- -- You can use upper case for header "Content-Type" here to get the value.
-- local content_type = core.request.header(ctx, "Content-Type") -- "application/json"
function _M.header(ctx, name)
if not ctx then
ctx = ngx.ctx.api_ctx
end
local value = _headers(ctx)[name]
return type(value) == "table" and value[1] or value
end
local function modify_header(ctx, header_name, header_value, override)
if type(ctx) == "string" then
-- It would be simpler to keep compatibility if we put 'ctx'
-- after 'header_value', but the style is too ugly!
header_value = header_name
header_name = ctx
ctx = nil
if override then
log.warn("DEPRECATED: use set_header(ctx, header_name, header_value) instead")
else
log.warn("DEPRECATED: use add_header(ctx, header_name, header_value) instead")
end
end
local err
header_name, err = _validate_header_name(header_name)
if err then
error(err)
end
local changed = false
if is_apisix_or then
changed = a6_request.is_request_header_set()
end
if override then
req_set_header(header_name, header_value)
else
req_add_header(header_name, header_value)
end
if ctx and ctx.var then
-- when the header is updated, clear cache of ctx.var
ctx.var["http_" .. str_lower(header_name)] = nil
end
if is_apisix_or and not changed then
-- if the headers are not changed before,
-- we can only update part of the cache instead of invalidating the whole
a6_request.clear_request_header()
if ctx and ctx.headers then
if override or not ctx.headers[header_name] then
ctx.headers[header_name] = header_value
else
local values = ctx.headers[header_name]
if type(values) == "table" then
table_insert(values, header_value)
else
ctx.headers[header_name] = {values, header_value}
end
end
end
end
end
function _M.set_header(ctx, header_name, header_value)
modify_header(ctx, header_name, header_value, true)
end
function _M.add_header(ctx, header_name, header_value)
modify_header(ctx, header_name, header_value, false)
end
-- return the remote address of client which directly connecting to APISIX.
-- so if there is a load balancer between downstream client and APISIX,
-- this function will return the ip of load balancer.
function _M.get_ip(ctx)
if not ctx then
ctx = ngx.ctx.api_ctx
end
return ctx.var.realip_remote_addr or ctx.var.remote_addr or ''
end
-- get remote address of downstream client,
-- in cases there is a load balancer between downstream client and APISIX.
function _M.get_remote_client_ip(ctx)
if not ctx then
ctx = ngx.ctx.api_ctx
end
return ctx.var.remote_addr or ''
end
function _M.get_remote_client_port(ctx)
if not ctx then
ctx = ngx.ctx.api_ctx
end
return tonumber(ctx.var.remote_port)
end
function _M.get_uri_args(ctx)
if not ctx then
ctx = ngx.ctx.api_ctx
end
if not ctx.req_uri_args then
-- use 0 to avoid truncated result and keep the behavior as the
-- same as other platforms
local args = req_get_uri_args(0)
ctx.req_uri_args = args
end
return ctx.req_uri_args
end
function _M.set_uri_args(ctx, args)
if not ctx then
ctx = ngx.ctx.api_ctx
end
ctx.req_uri_args = nil
return req_set_uri_args(args)
end
function _M.get_post_args(ctx)
if not ctx then
ctx = ngx.ctx.api_ctx
end
if not ctx.req_post_args then
req_read_body()
-- use 0 to avoid truncated result and keep the behavior as the
-- same as other platforms
local args, err = req_get_post_args(0)
if not args then
-- do we need a way to handle huge post forms?
log.error("the post form is too large: ", err)
args = {}
end
ctx.req_post_args = args
end
return ctx.req_post_args
end
local function check_size(size, max_size)
if max_size and size > max_size then
return nil, "request size " .. size .. " is greater than the "
.. "maximum size " .. max_size .. " allowed"
end
return true
end
local function test_expect(var)
local expect = var.http_expect
return expect and str_lower(expect) == "100-continue"
end
function _M.get_body(max_size, ctx)
if max_size then
local var = ctx and ctx.var or ngx.var
local content_length = tonumber(var.http_content_length)
if content_length then
local ok, err = check_size(content_length, max_size)
if not ok then
-- When client_max_body_size is exceeded, Nginx will set r->expect_tested = 1 to
-- avoid sending the 100 CONTINUE.
-- We use trick below to imitate this behavior.
if test_expect(var) then
clear_header("expect")
end
return nil, err
end
end
end
-- check content-length header for http2/http3
do
local var = ctx and ctx.var or ngx.var
local content_length = tonumber(var.http_content_length)
if (var.server_protocol == "HTTP/2.0" or var.server_protocol == "HTTP/3.0")
and not content_length then
return nil, "HTTP2/HTTP3 request without a Content-Length header"
end
end
req_read_body()
local req_body = req_get_body_data()
if req_body then
local ok, err = check_size(#req_body, max_size)
if not ok then
return nil, err
end
return req_body
end
local file_name = req_get_body_file()
if not file_name then
return nil
end
log.info("attempt to read body from file: ", file_name)
if max_size then
local size, err = lfs.attributes (file_name, "size")
if not size then
return nil, err
end
local ok, err = check_size(size, max_size)
if not ok then
return nil, err
end
end
local req_body, err = io.get_file(file_name)
return req_body, err
end
function _M.get_json_request_body_table()
local body, err = _M.get_body()
if not body then
return nil, { message = "could not get body: " .. (err or "request body is empty") }
end
local body_tab, err = json.decode(body)
if not body_tab then
return nil, { message = "could not get parse JSON request body: " .. err }
end
return body_tab
end
function _M.get_scheme(ctx)
if not ctx then
ctx = ngx.ctx.api_ctx
end
return ctx.var.scheme or ''
end
function _M.get_host(ctx)
if not ctx then
ctx = ngx.ctx.api_ctx
end
return ctx.var.host or ''
end
function _M.get_port(ctx)
if not ctx then
ctx = ngx.ctx.api_ctx
end
return tonumber(ctx.var.server_port)
end
_M.get_http_version = ngx.req.http_version
_M.get_method = ngx.req.get_method
return _M

View File

@@ -0,0 +1,96 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- Domain Resolver.
--
-- @module core.resolver
local json = require("apisix.core.json")
local log = require("apisix.core.log")
local utils = require("apisix.core.utils")
local dns_utils = require("resty.dns.utils")
local config_local = require("apisix.core.config_local")
local HOSTS_IP_MATCH_CACHE = {}
local _M = {}
local function init_hosts_ip()
local hosts, err = dns_utils.parseHosts()
if not hosts then
return hosts, err
end
HOSTS_IP_MATCH_CACHE = hosts
end
function _M.init_resolver(args)
-- initialize /etc/hosts
init_hosts_ip()
local dns_resolver = args and args["dns_resolver"]
utils.set_resolver(dns_resolver)
log.info("dns resolver ", json.delay_encode(dns_resolver, true))
end
---
-- Resolve domain name to ip.
--
-- @function core.resolver.parse_domain
-- @tparam string host Domain name that need to be resolved.
-- @treturn string The IP of the domain name after being resolved.
-- @usage
-- local ip, err = core.resolver.parse_domain("apache.org") -- "198.18.10.114"
function _M.parse_domain(host)
local rev = HOSTS_IP_MATCH_CACHE[host]
local enable_ipv6 = config_local.local_conf().apisix.enable_ipv6
if rev then
-- use ipv4 in high priority
local ip = rev["ipv4"]
if enable_ipv6 and not ip then
ip = rev["ipv6"]
end
if ip then
-- meet test case
log.info("dns resolve ", host, ", result: ", json.delay_encode(ip))
log.info("dns resolver domain: ", host, " to ", ip)
return ip
end
end
local ip_info, err = utils.dns_parse(host)
if not ip_info then
log.error("failed to parse domain: ", host, ", error: ",err)
return nil, err
end
log.info("parse addr: ", json.delay_encode(ip_info))
log.info("resolver: ", json.delay_encode(utils.get_resolver()))
log.info("host: ", host)
if ip_info.address then
log.info("dns resolver domain: ", host, " to ", ip_info.address)
return ip_info.address
end
return nil, "failed to parse domain"
end
return _M

View File

@@ -0,0 +1,231 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- Get the information form upstream response, or set the information to client response.
--
-- @module core.response
local encode_json = require("cjson.safe").encode
local ngx = ngx
local arg = ngx.arg
local ngx_print = ngx.print
local ngx_header = ngx.header
local ngx_add_header
if ngx.config.subsystem == "http" then
local ngx_resp = require "ngx.resp"
ngx_add_header = ngx_resp.add_header
end
local error = error
local select = select
local type = type
local ngx_exit = ngx.exit
local concat_tab = table.concat
local str_sub = string.sub
local tonumber = tonumber
local clear_tab = require("table.clear")
local pairs = pairs
local _M = {version = 0.1}
local resp_exit
do
local t = {}
local idx = 1
function resp_exit(code, ...)
clear_tab(t)
idx = 0
if code and type(code) ~= "number" then
idx = idx + 1
t[idx] = code
code = nil
end
if code then
ngx.status = code
end
for i = 1, select('#', ...) do
local v = select(i, ...)
if type(v) == "table" then
local body, err = encode_json(v)
if err then
error("failed to encode data: " .. err, -2)
else
idx = idx + 1
t[idx] = body
idx = idx + 1
t[idx] = "\n"
end
elseif v ~= nil then
idx = idx + 1
t[idx] = v
end
end
if idx > 0 then
ngx_print(t)
end
if code then
return ngx_exit(code)
end
end
end -- do
_M.exit = resp_exit
function _M.say(...)
resp_exit(nil, ...)
end
local function set_header(append, ...)
if ngx.headers_sent then
error("headers have already been sent", 2)
end
local count = select('#', ...)
if count == 1 then
local headers = select(1, ...)
if type(headers) ~= "table" then
-- response.set_header(name, nil)
ngx_header[headers] = nil
return
end
for k, v in pairs(headers) do
if append then
ngx_add_header(k, v)
else
ngx_header[k] = v
end
end
return
end
for i = 1, count, 2 do
if append then
ngx_add_header(select(i, ...), select(i + 1, ...))
else
ngx_header[select(i, ...)] = select(i + 1, ...)
end
end
end
function _M.set_header(...)
set_header(false, ...)
end
---
-- Add a header to the client response.
--
-- @function core.response.add_header
-- @usage
-- core.response.add_header("Apisix-Plugins", "no plugin")
function _M.add_header(...)
set_header(true, ...)
end
function _M.get_upstream_status(ctx)
-- $upstream_status maybe including multiple status, only need the last one
return tonumber(str_sub(ctx.var.upstream_status or "", -3))
end
function _M.clear_header_as_body_modified()
ngx.header.content_length = nil
-- in case of upstream content is compressed content
ngx.header.content_encoding = nil
-- clear cache identifier
ngx.header.last_modified = nil
ngx.header.etag = nil
end
-- Hold body chunks and return the final body once all chunks have been read.
-- Usage:
-- function _M.body_filter(conf, ctx)
-- local final_body = core.response.hold_body_chunk(ctx)
-- if not final_body then
-- return
-- end
-- final_body = transform(final_body)
-- ngx.arg[1] = final_body
-- ...
function _M.hold_body_chunk(ctx, hold_the_copy, max_resp_body_bytes)
local body_buffer
local chunk, eof = arg[1], arg[2]
if not ctx._body_buffer then
ctx._body_buffer = {}
end
if type(chunk) == "string" and chunk ~= "" then
body_buffer = ctx._body_buffer[ctx._plugin_name]
if not body_buffer then
body_buffer = {
chunk,
n = 1
}
ctx._body_buffer[ctx._plugin_name] = body_buffer
ctx._resp_body_bytes = #chunk
else
local n = body_buffer.n + 1
body_buffer.n = n
body_buffer[n] = chunk
ctx._resp_body_bytes = ctx._resp_body_bytes + #chunk
end
if max_resp_body_bytes and ctx._resp_body_bytes >= max_resp_body_bytes then
local body_data = concat_tab(body_buffer, "", 1, body_buffer.n)
body_data = str_sub(body_data, 1, max_resp_body_bytes)
return body_data
end
end
if eof then
body_buffer = ctx._body_buffer[ctx._plugin_name]
if not body_buffer then
if max_resp_body_bytes and #chunk >= max_resp_body_bytes then
chunk = str_sub(chunk, 1, max_resp_body_bytes)
end
return chunk
end
local body_data = concat_tab(body_buffer, "", 1, body_buffer.n)
ctx._body_buffer[ctx._plugin_name] = nil
return body_data
end
if not hold_the_copy then
-- flush the origin body chunk
arg[1] = nil
end
return nil
end
return _M

View File

@@ -0,0 +1,71 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- Json schema validation module.
--
-- @module core.schema
local jsonschema = require('jsonschema')
local lrucache = require("apisix.core.lrucache")
local cached_validator = lrucache.new({count = 1000, ttl = 0})
local pcall = pcall
local _M = {
version = 0.3,
TYPE_CONSUMER = 1,
TYPE_METADATA = 2,
}
local function create_validator(schema)
-- local code = jsonschema.generate_validator_code(schema, opts)
-- local file2=io.output("/tmp/2.txt")
-- file2:write(code)
-- file2:close()
local ok, res = pcall(jsonschema.generate_validator, schema)
if ok then
return res
end
return nil, res -- error message
end
local function get_validator(schema)
local validator, err = cached_validator(schema, nil,
create_validator, schema)
if not validator then
return nil, err
end
return validator, nil
end
function _M.check(schema, json)
local validator, err = get_validator(schema)
if not validator then
return false, err
end
return validator(json)
end
_M.valid = get_validator
return _M

View File

@@ -0,0 +1,136 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- Wrapped string module.
--
-- @module core.string
local error = error
local type = type
local str_byte = string.byte
local str_find = string.find
local ffi = require("ffi")
local C = ffi.C
local ffi_cast = ffi.cast
local ngx = ngx
local ngx_decode_args = ngx.decode_args
local ngx_encode_args = ngx.encode_args
ffi.cdef[[
int memcmp(const void *s1, const void *s2, size_t n);
]]
local _M = {
version = 0.1,
}
setmetatable(_M, {__index = string})
-- find a needle from a haystack in the plain text way
-- note: Make sure that the haystack is 'string' type, otherwise an exception will be thrown.
function _M.find(haystack, needle, from)
return str_find(haystack, needle, from or 1, true)
end
---
-- Tests whether the string s begins with prefix.
--
-- @function core.string.has_prefix
-- @tparam string s The string being tested.
-- @tparam string prefix Specify the prefix.
-- @treturn boolean Test result, true means the string s begins with prefix.
-- @usage
-- local res = core.string.has_prefix("/apisix/admin/routes", "/apisix/") -- true
function _M.has_prefix(s, prefix)
if type(s) ~= "string" or type(prefix) ~= "string" then
error("unexpected type: s:" .. type(s) .. ", prefix:" .. type(prefix))
end
if #s < #prefix then
return false
end
local rc = C.memcmp(s, prefix, #prefix)
return rc == 0
end
function _M.has_suffix(s, suffix)
if type(s) ~= "string" or type(suffix) ~= "string" then
error("unexpected type: s:" .. type(s) .. ", suffix:" .. type(suffix))
end
if #s < #suffix then
return false
end
local rc = C.memcmp(ffi_cast("char *", s) + #s - #suffix, suffix, #suffix)
return rc == 0
end
function _M.rfind_char(s, ch, idx)
local b = str_byte(ch)
for i = idx or #s, 1, -1 do
if str_byte(s, i, i) == b then
return i
end
end
return nil
end
-- reduce network consumption by compressing string indentation
-- this method should be used with caution
-- it will remove the spaces at the beginning of each line
-- and remove the spaces after `,` character
function _M.compress_script(s)
s = ngx.re.gsub(s, [[^\s+]], "", "mjo")
s = ngx.re.gsub(s, [[,\s+]], ",", "mjo")
return s
end
---
-- Decodes a URI encoded query-string into a Lua table.
-- All request arguments received will be decoded by default.
--
-- @function core.string.decode_args
-- @tparam string args A URI encoded query-string.
-- @treturn table the value of decoded query-string.
-- @usage
-- local args, err = core.string.decode_args("a=1&b=2") -- {a=1, b=2}
function _M.decode_args(args)
-- use 0 to avoid truncated result and keep the behavior as the
-- same as other platforms
return ngx_decode_args(args, 0)
end
---
-- Encode the Lua table to a query args string according to the URI encoded rules.
--
-- @function core.string.encode_args
-- @tparam table args The query args Lua table.
-- @treturn string the value of query args string.
-- @usage
-- local str = core.string.encode_args({a=1, b=2}) -- "a=1&b=2"
function _M.encode_args(args)
return ngx_encode_args(args)
end
return _M

View File

@@ -0,0 +1,287 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- Wrapped table module.
--
-- @module core.table
local newproxy = newproxy
local getmetatable = getmetatable
local setmetatable = setmetatable
local select = select
local tostring = tostring
local new_tab = require("table.new")
local nkeys = require("table.nkeys")
local ipairs = ipairs
local pairs = pairs
local type = type
local ngx_re = require("ngx.re")
local _M = {
version = 0.2,
new = new_tab,
clear = require("table.clear"),
nkeys = nkeys,
insert = table.insert,
concat = table.concat,
sort = table.sort,
clone = require("table.clone"),
isarray = require("table.isarray"),
isempty = require("table.isempty"),
}
setmetatable(_M, {__index = table})
function _M.insert_tail(tab, ...)
local idx = #tab
for i = 1, select('#', ...) do
idx = idx + 1
tab[idx] = select(i, ...)
end
return idx
end
function _M.set(tab, ...)
for i = 1, select('#', ...) do
tab[i] = select(i, ...)
end
end
function _M.try_read_attr(tab, ...)
local count = select('#', ...)
for i = 1, count do
local attr = select(i, ...)
if type(tab) ~= "table" then
return nil
end
tab = tab[attr]
end
return tab
end
---
-- Test if an element exists in an array.
--
-- @function core.table.array_find
-- @tparam table array The tested array.
-- @tparam string val The tested value.
-- @treturn number The index of tested value.
-- @usage
-- local arr = {"a", "b", "c"}
-- local idx = core.table.array_find(arr, "b") -- idx = 2
local function array_find(array, val)
if type(array) ~= "table" then
return nil
end
for i, v in ipairs(array) do
if v == val then
return i
end
end
return nil
end
_M.array_find = array_find
-- only work under lua51 or luajit
function _M.setmt__gc(t, mt)
local prox = newproxy(true)
getmetatable(prox).__gc = function() mt.__gc(t) end
t[prox] = true
return setmetatable(t, mt)
end
local deepcopy
do
local function _deepcopy(orig, copied, parent, opts)
-- If the array-like table contains nil in the middle,
-- the len might be smaller than the expected.
-- But it doesn't affect the correctness.
local len = #orig
local copy = new_tab(len, nkeys(orig) - len)
-- prevent infinite loop when a field refers its parent
copied[orig] = copy
for orig_key, orig_value in pairs(orig) do
local path = parent .. "." .. tostring(orig_key)
if opts and array_find(opts.shallows, path) then
copy[orig_key] = orig_value
else
if type(orig_value) == "table" then
if copied[orig_value] then
copy[orig_key] = copied[orig_value]
else
copy[orig_key] = _deepcopy(orig_value, copied, path, opts)
end
else
copy[orig_key] = orig_value
end
end
end
local mt = getmetatable(orig)
if mt ~= nil then
setmetatable(copy, mt)
end
return copy
end
local copied_recorder = {}
function deepcopy(orig, opts)
local orig_type = type(orig)
if orig_type ~= 'table' then
return orig
end
local res = _deepcopy(orig, copied_recorder, "self", opts)
_M.clear(copied_recorder)
return res
end
end
_M.deepcopy = deepcopy
local ngx_null = ngx.null
local function merge(origin, extend)
for k,v in pairs(extend) do
if type(v) == "table" then
if type(origin[k] or false) == "table" then
if _M.nkeys(origin[k]) ~= #origin[k] then
merge(origin[k] or {}, extend[k] or {})
else
origin[k] = v
end
else
origin[k] = v
end
elseif v == ngx_null then
origin[k] = nil
else
origin[k] = v
end
end
return origin
end
_M.merge = merge
local function patch(node_value, sub_path, conf)
local sub_value = node_value
local sub_paths = ngx_re.split(sub_path, "/")
for i = 1, #sub_paths - 1 do
local sub_name = sub_paths[i]
if sub_value[sub_name] == nil then
sub_value[sub_name] = {}
end
sub_value = sub_value[sub_name]
if type(sub_value) ~= "table" then
return 400, "invalid sub-path: /"
.. _M.concat(sub_paths, 1, i)
end
end
if type(sub_value) ~= "table" then
return 400, "invalid sub-path: /" .. sub_path
end
local sub_name = sub_paths[#sub_paths]
if sub_name and sub_name ~= "" then
sub_value[sub_name] = conf
else
node_value = conf
end
return nil, nil, node_value
end
_M.patch = patch
-- Compare two tables as if they are sets (only compare the key part)
function _M.set_eq(a, b)
if nkeys(a) ~= nkeys(b) then
return false
end
for k in pairs(a) do
if b[k] == nil then
return false
end
end
return true
end
-- Compare two elements, including their descendants
local function deep_eq(a, b)
local type_a = type(a)
local type_b = type(b)
if type_a ~= 'table' or type_b ~= 'table' then
return a == b
end
local n_a = nkeys(a)
local n_b = nkeys(b)
if n_a ~= n_b then
return false
end
for k, v_a in pairs(a) do
local v_b = b[k]
local eq = deep_eq(v_a, v_b)
if not eq then
return false
end
end
return true
end
_M.deep_eq = deep_eq
-- pick takes the given attributes out of object
function _M.pick(obj, attrs)
local data = {}
for k, v in pairs(obj) do
if attrs[k] ~= nil then
data[k] = v
end
end
return data
end
return _M

View File

@@ -0,0 +1,108 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- Wrapped timer module, can cancel the running timers.
--
-- @module core.timer
local log = require("apisix.core.log")
local sleep = require("apisix.core.utils").sleep
local timer_every = ngx.timer.every
local timer_at = ngx.timer.at
local update_time = ngx.update_time
local now = ngx.now
local pcall = pcall
local _M = {
version = 0.1,
}
local function _internal(timer)
timer.start_time = now()
repeat
local ok, err = pcall(timer.callback_fun)
if not ok then
log.error("failed to run the timer: ", timer.name, " err: ", err)
if timer.sleep_fail > 0 then
sleep(timer.sleep_fail)
end
elseif timer.sleep_succ > 0 then
sleep(timer.sleep_succ)
end
update_time()
until timer.each_ttl <= 0 or now() >= timer.start_time + timer.each_ttl
end
local function run_timer(premature, self)
if self.running or premature then
return
end
self.running = true
local ok, err = pcall(_internal, self)
if not ok then
log.error("failed to run timer[", self.name, "] err: ", err)
end
self.running = false
end
function _M.new(name, callback_fun, opts)
if not name then
return nil, "missing argument: name"
end
if not callback_fun then
return nil, "missing argument: callback_fun"
end
opts = opts or {}
local timer = {
name = name,
each_ttl = opts.each_ttl or 1,
sleep_succ = opts.sleep_succ or 1,
sleep_fail = opts.sleep_fail or 5,
start_time = 0,
callback_fun = callback_fun,
running = false,
}
local hdl, err = timer_every(opts.check_interval or 1,
run_timer, timer)
if not hdl then
return nil, err
end
hdl, err = timer_at(0, run_timer, timer)
if not hdl then
return nil, err
end
return timer
end
return _M

View File

@@ -0,0 +1,465 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- Collection of util functions.
--
-- @module core.utils
local config_local = require("apisix.core.config_local")
local core_str = require("apisix.core.string")
local rfind_char = core_str.rfind_char
local table = require("apisix.core.table")
local log = require("apisix.core.log")
local string = require("apisix.core.string")
local dns_client = require("apisix.core.dns.client")
local ngx_re = require("ngx.re")
local ipmatcher = require("resty.ipmatcher")
local ffi = require("ffi")
local base = require("resty.core.base")
local open = io.open
local sub_str = string.sub
local str_byte = string.byte
local tonumber = tonumber
local tostring = tostring
local re_gsub = ngx.re.gsub
local re_match = ngx.re.match
local re_gmatch = ngx.re.gmatch
local type = type
local io_popen = io.popen
local C = ffi.C
local ffi_string = ffi.string
local get_string_buf = base.get_string_buf
local exiting = ngx.worker.exiting
local ngx_sleep = ngx.sleep
local ipairs = ipairs
local hostname
local dns_resolvers
local current_inited_resolvers
local current_dns_client
local max_sleep_interval = 1
ffi.cdef[[
int ngx_escape_uri(char *dst, const char *src,
size_t size, int type);
]]
local _M = {
version = 0.2,
parse_ipv4 = ipmatcher.parse_ipv4,
parse_ipv6 = ipmatcher.parse_ipv6,
}
function _M.get_seed_from_urandom()
local frandom, err = open("/dev/urandom", "rb")
if not frandom then
return nil, 'failed to open /dev/urandom: ' .. err
end
local str = frandom:read(8)
frandom:close()
if not str then
return nil, 'failed to read data from /dev/urandom'
end
local seed = 0
for i = 1, 8 do
seed = 256 * seed + str:byte(i)
end
return seed
end
function _M.split_uri(uri)
return ngx_re.split(uri, "/")
end
local function dns_parse(domain, selector)
if dns_resolvers ~= current_inited_resolvers then
local local_conf = config_local.local_conf()
local valid = table.try_read_attr(local_conf, "apisix", "dns_resolver_valid")
local enable_resolv_search_opt = table.try_read_attr(local_conf, "apisix",
"enable_resolv_search_opt")
local opts = {
nameservers = table.clone(dns_resolvers),
order = {"last", "A", "AAAA", "CNAME"}, -- avoid querying SRV
}
opts.validTtl = valid
if not enable_resolv_search_opt then
opts.search = {}
end
local client, err = dns_client.new(opts)
if not client then
return nil, "failed to init the dns client: " .. err
end
current_dns_client = client
current_inited_resolvers = dns_resolvers
end
return current_dns_client:resolve(domain, selector)
end
_M.dns_parse = dns_parse
local function set_resolver(resolvers)
dns_resolvers = resolvers
end
_M.set_resolver = set_resolver
function _M.get_resolver(resolvers)
return dns_resolvers
end
local function _parse_ipv4_or_host(addr)
local pos = rfind_char(addr, ":", #addr - 1)
if not pos then
return addr, nil
end
local host = sub_str(addr, 1, pos - 1)
local port = sub_str(addr, pos + 1)
return host, tonumber(port)
end
local function _parse_ipv6_without_port(addr)
return addr
end
-- parse_addr parses 'addr' into the host and the port parts. If the 'addr'
-- doesn't have a port, nil is used to return.
-- For IPv6 literal host with brackets, like [::1], the square brackets will be kept.
-- For malformed 'addr', the returned value can be anything. This method doesn't validate
-- if the input is valid.
function _M.parse_addr(addr)
if str_byte(addr, 1) == str_byte("[") then
-- IPv6 format, with brackets, maybe with port
local right_bracket = str_byte("]")
local len = #addr
if str_byte(addr, len) == right_bracket then
-- addr in [ip:v6] format
return addr, nil
else
local pos = rfind_char(addr, ":", #addr - 1)
if not pos or str_byte(addr, pos - 1) ~= right_bracket then
-- malformed addr
return addr, nil
end
-- addr in [ip:v6]:port format
local host = sub_str(addr, 1, pos - 1)
local port = sub_str(addr, pos + 1)
return host, tonumber(port)
end
else
-- When we reach here, the input can be:
-- 1. IPv4
-- 2. IPv4, with port
-- 3. IPv6, like "2001:db8::68" or "::ffff:192.0.2.1"
-- 4. Malformed input
-- 5. Host, like "test.com" or "localhost"
-- 6. Host with port
local colon = str_byte(":")
local colon_counter = 0
local dot = str_byte(".")
for i = 1, #addr do
local ch = str_byte(addr, i, i)
if ch == dot then
return _parse_ipv4_or_host(addr)
elseif ch == colon then
colon_counter = colon_counter + 1
if colon_counter == 2 then
return _parse_ipv6_without_port(addr)
end
end
end
return _parse_ipv4_or_host(addr)
end
end
function _M.uri_safe_encode(uri)
local count_escaped = C.ngx_escape_uri(nil, uri, #uri, 0)
local len = #uri + 2 * count_escaped
local buf = get_string_buf(len)
C.ngx_escape_uri(buf, uri, #uri, 0)
return ffi_string(buf, len)
end
function _M.validate_header_field(field)
for i = 1, #field do
local b = str_byte(field, i, i)
-- '!' - '~', excluding ':'
if not (32 < b and b < 127) or b == 58 then
return false
end
end
return true
end
function _M.validate_header_value(value)
if type(value) ~= "string" then
return true
end
for i = 1, #value do
local b = str_byte(value, i, i)
-- control characters
if b < 32 or b >= 127 then
return false
end
end
return true
end
---
-- Returns the standard host name of the local host.
-- only use this method in init/init_worker phase.
--
-- @function core.utils.gethostname
-- @treturn string The host name of the local host.
-- @usage
-- local hostname = core.utils.gethostname() -- "localhost"
function _M.gethostname()
if hostname then
return hostname
end
local hd = io_popen("/bin/hostname")
local data, err = hd:read("*a")
if err == nil then
hostname = data
if string.has_suffix(hostname, "\r\n") then
hostname = sub_str(hostname, 1, -3)
elseif string.has_suffix(hostname, "\n") then
hostname = sub_str(hostname, 1, -2)
end
else
hostname = "unknown"
log.error("failed to read output of \"/bin/hostname\": ", err)
end
return hostname
end
local function sleep(sec)
if sec <= max_sleep_interval then
return ngx_sleep(sec)
end
ngx_sleep(max_sleep_interval)
if exiting() then
return
end
sec = sec - max_sleep_interval
return sleep(sec)
end
_M.sleep = sleep
local resolve_var
do
local _ctx
local n_resolved
local pat = [[(?<!\\)\$(\{(\w+)\}|(\w+))]]
local _escaper
local function resolve(m)
local variable = m[2] or m[3]
local v = _ctx[variable]
if v == nil then
return ""
end
n_resolved = n_resolved + 1
if _escaper then
return _escaper(tostring(v))
end
return tostring(v)
end
function resolve_var(tpl, ctx, escaper)
n_resolved = 0
if not tpl then
return tpl, nil, n_resolved
end
local from = core_str.find(tpl, "$")
if not from then
return tpl, nil, n_resolved
end
-- avoid creating temporary function
_ctx = ctx
_escaper = escaper
local res, _, err = re_gsub(tpl, pat, resolve, "jo")
_ctx = nil
_escaper = nil
if not res then
return nil, err
end
return res, nil, n_resolved
end
end
-- Resolve ngx.var in the given string
_M.resolve_var = resolve_var
local resolve_var_with_captures
do
local _captures
-- escape is not supported very well, like there is a redundant '\' after escape "$1"
local pat = [[ (?<! \\) \$ \{? (\d+) \}? ]]
local function resolve(m)
local v = _captures[tonumber(m[1])]
if not v then
v = ""
end
return v
end
-- captures is the match result of regex uri in proxy-rewrite plugin
function resolve_var_with_captures(tpl, captures)
if not tpl then
return tpl, nil
end
local from = core_str.find(tpl, "$")
if not from then
return tpl, nil
end
captures = captures or {}
_captures = captures
local res, _, err = re_gsub(tpl, pat, resolve, "jox")
_captures = nil
if not res then
return nil, err
end
return res, nil
end
end
-- Resolve {$1, $2, ...} in the given string
_M.resolve_var_with_captures = resolve_var_with_captures
-- if `str` is a string containing period `some_plugin.some_field.nested_field`
-- return the table that contains `nested_field` in its root level
-- else return the original table `conf`
local function get_root_conf(str, conf, field)
-- if the string contains periods, get the splits in `it` iterator
local it, _ = re_gmatch(str, [[([^\.]+)]])
if not it then
return conf, field
end
-- add the splits into a table
local matches = {}
while true do
local m, _ = it()
if not m then
break
end
table.insert(matches, m[0])
end
-- get to the table that holds the last field
local num_of_matches = #matches
for i = 1, num_of_matches - 1 , 1 do
conf = conf[matches[i]]
end
-- return the table and the last field
return conf, matches[num_of_matches]
end
local function find_and_log(field, plugin_name, value)
local match, err = re_match(value, "^https")
if not match and not err then
log.warn("Using ", plugin_name, " " , field, " with no TLS is a security risk")
end
end
function _M.check_https(fields, conf, plugin_name)
for _, field in ipairs(fields) do
local new_conf, new_field = get_root_conf(field, conf)
if not new_conf then
return
end
local value = new_conf[new_field]
if not value then
return
end
if type(value) == "table" then
for _, v in ipairs(value) do
find_and_log(field, plugin_name, v)
end
else
find_and_log(field, plugin_name, value)
end
end
end
function _M.check_tls_bool(fields, conf, plugin_name)
for i, field in ipairs(fields) do
local new_conf, new_field = get_root_conf(field, conf)
if not new_conf then
return
end
local value = new_conf[new_field]
if value ~= true and value ~= nil then
log.warn("Keeping ", field, " disabled in ",
plugin_name, " configuration is a security risk")
end
end
end
return _M

View File

@@ -0,0 +1,24 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--- Return APISIX current version.
--
-- @module core.version
return {
VERSION = "3.13.0"
}

View File

@@ -0,0 +1,315 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local require = require
local yaml = require("lyaml")
local log = require("apisix.core.log")
local profile = require("apisix.core.profile")
local lfs = require("lfs")
local inspect = require("inspect")
local jsonschema = require("jsonschema")
local io = io
local ngx = ngx
local re_find = ngx.re.find
local get_headers = ngx.req.get_headers
local type = type
local pairs = pairs
local setmetatable = setmetatable
local pcall = pcall
local ipairs = ipairs
local unpack = unpack
local debug_yaml_path = profile:yaml_path("debug")
local debug_yaml
local debug_yaml_ctime
local _M = {version = 0.1}
local config_schema = {
type = "object",
properties = {
basic = {
properties = {
enable = {
type = "boolean",
},
}
},
http_filter = {
properties = {
enable = {
type = "boolean",
},
enable_header_name = {
type = "string",
},
}
},
hook_conf = {
properties = {
enable = {
type = "boolean",
},
name = {
type = "string",
},
log_level = {
enum = {"debug", "info", "notice", "warn", "error",
"crit", "alert","emerg"},
},
is_print_input_args = {
type = "boolean",
},
is_print_return_value = {
type = "boolean",
},
}
},
},
required = {"basic", "http_filter", "hook_conf"},
}
local function read_debug_yaml()
local attributes, err = lfs.attributes(debug_yaml_path)
if not attributes then
log.notice("failed to fetch ", debug_yaml_path, " attributes: ", err)
return
end
-- log.info("change: ", json.encode(attributes))
local last_change_time = attributes.change
if debug_yaml_ctime == last_change_time then
return
end
local f, err = io.open(debug_yaml_path, "r")
if not f then
log.error("failed to open file ", debug_yaml_path, " : ", err)
return
end
local found_end_flag
for i = 1, 10 do
f:seek('end', -i)
local end_flag = f:read("*a")
-- log.info(i, " flag: ", end_flag)
if re_find(end_flag, [[#END\s*]], "jo") then
found_end_flag = true
break
end
end
if not found_end_flag then
f:seek("set")
local size = f:seek("end")
f:close()
if size > 8 then
log.warn("missing valid end flag in file ", debug_yaml_path)
end
return
end
f:seek('set')
local yaml_config = f:read("*a")
f:close()
local debug_yaml_new = yaml.load(yaml_config)
if not debug_yaml_new then
log.error("failed to parse the content of file " .. debug_yaml_path)
return
end
debug_yaml_new.hooks = debug_yaml_new.hooks or {}
debug_yaml = debug_yaml_new
debug_yaml_ctime = last_change_time
-- validate the debug yaml config
local validator = jsonschema.generate_validator(config_schema)
local ok, err = validator(debug_yaml)
if not ok then
log.error("failed to validate debug config " .. err)
return
end
return true
end
local sync_debug_hooks
do
local pre_mtime
local enabled_hooks = {}
local function apply_new_fun(module, fun_name, file_path, hook_conf)
local log_level = hook_conf.log_level or "warn"
if not module or type(module[fun_name]) ~= "function" then
log.error("failed to find function [", fun_name,
"] in module:", file_path)
return
end
local fun = module[fun_name]
local fun_org
if enabled_hooks[fun] then
fun_org = enabled_hooks[fun].org
enabled_hooks[fun] = nil
else
fun_org = fun
end
local t = {fun_org = fun_org}
local mt = {}
function mt.__call(self, ...)
local arg = {...}
local http_filter = debug_yaml.http_filter
local api_ctx = ngx.ctx.api_ctx
local enable_by_hook = not (http_filter and http_filter.enable)
local enable_by_header_filter = (http_filter and http_filter.enable)
and (api_ctx and api_ctx.enable_dynamic_debug)
if hook_conf.is_print_input_args then
if enable_by_hook or enable_by_header_filter then
log[log_level]("call require(\"", file_path, "\").", fun_name,
"() args:", inspect(arg))
end
end
local ret = {self.fun_org(...)}
if hook_conf.is_print_return_value then
if enable_by_hook or enable_by_header_filter then
log[log_level]("call require(\"", file_path, "\").", fun_name,
"() return:", inspect(ret))
end
end
return unpack(ret)
end
setmetatable(t, mt)
enabled_hooks[t] = {
org = fun_org, new = t, mod = module,
fun_name = fun_name
}
module[fun_name] = t
end
function sync_debug_hooks()
if not debug_yaml_ctime or debug_yaml_ctime == pre_mtime then
return
end
for _, hook in pairs(enabled_hooks) do
local m = hook.mod
local name = hook.fun_name
m[name] = hook.org
end
enabled_hooks = {}
local hook_conf = debug_yaml.hook_conf
if not hook_conf.enable then
pre_mtime = debug_yaml_ctime
return
end
local hook_name = hook_conf.name or ""
local hooks = debug_yaml[hook_name]
if not hooks then
pre_mtime = debug_yaml_ctime
return
end
for file_path, fun_names in pairs(hooks) do
local ok, module = pcall(require, file_path)
if not ok then
log.error("failed to load module [", file_path, "]: ", module)
else
for _, fun_name in ipairs(fun_names) do
apply_new_fun(module, fun_name, file_path, hook_conf)
end
end
end
pre_mtime = debug_yaml_ctime
end
end --do
local function sync_debug_status(premature)
if premature then
return
end
if not read_debug_yaml() then
return
end
sync_debug_hooks()
end
local function check()
if not debug_yaml or not debug_yaml.http_filter then
return false
end
local http_filter = debug_yaml.http_filter
if not http_filter or not http_filter.enable_header_name or not http_filter.enable then
return false
end
return true
end
function _M.dynamic_debug(api_ctx)
if not check() then
return
end
if get_headers()[debug_yaml.http_filter.enable_header_name] then
api_ctx.enable_dynamic_debug = true
end
end
function _M.enable_debug()
if not debug_yaml or not debug_yaml.basic then
return false
end
return debug_yaml.basic.enable
end
function _M.init_worker()
local process = require("ngx.process")
if process.type() ~= "worker" then
return
end
sync_debug_status()
ngx.timer.every(1, sync_debug_status)
end
return _M

View File

@@ -0,0 +1,691 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local require = require
local local_conf = require("apisix.core.config_local").local_conf()
local core = require("apisix.core")
local core_sleep = require("apisix.core.utils").sleep
local resty_consul = require('resty.consul')
local http = require('resty.http')
local util = require("apisix.cli.util")
local ipairs = ipairs
local error = error
local ngx = ngx
local unpack = unpack
local tonumber = tonumber
local pairs = pairs
local ngx_timer_at = ngx.timer.at
local ngx_timer_every = ngx.timer.every
local log = core.log
local json_delay_encode = core.json.delay_encode
local ngx_worker_id = ngx.worker.id
local exiting = ngx.worker.exiting
local thread_spawn = ngx.thread.spawn
local thread_wait = ngx.thread.wait
local thread_kill = ngx.thread.kill
local math_random = math.random
local pcall = pcall
local null = ngx.null
local type = type
local next = next
local all_services = core.table.new(0, 5)
local default_service
local default_weight
local sort_type
local skip_service_map = core.table.new(0, 1)
local dump_params
local events
local events_list
local consul_services
local default_skip_services = {"consul"}
local default_random_range = 5
local default_catalog_error_index = -1
local default_health_error_index = -2
local watch_type_catalog = 1
local watch_type_health = 2
local max_retry_time = 256
local _M = {
version = 0.3,
}
local function discovery_consul_callback(data, event, source, pid)
all_services = data
log.notice("update local variable all_services, event is: ", event,
"source: ", source, "server pid:", pid,
", all services: ", json_delay_encode(all_services, true))
end
function _M.all_nodes()
return all_services
end
function _M.nodes(service_name)
if not all_services then
log.error("all_services is nil, failed to fetch nodes for : ", service_name)
return
end
local resp_list = all_services[service_name]
if not resp_list then
log.error("fetch nodes failed by ", service_name, ", return default service")
return default_service and {default_service}
end
log.info("process id: ", ngx_worker_id(), ", all_services[", service_name, "] = ",
json_delay_encode(resp_list, true))
return resp_list
end
local function update_all_services(consul_server_url, up_services)
-- clean old unused data
local old_services = consul_services[consul_server_url] or {}
for k, _ in pairs(old_services) do
all_services[k] = nil
end
core.table.clear(old_services)
for k, v in pairs(up_services) do
all_services[k] = v
end
consul_services[consul_server_url] = up_services
log.info("update all services: ", json_delay_encode(all_services, true))
end
local function read_dump_services()
local data, err = util.read_file(dump_params.path)
if not data then
log.error("read dump file get error: ", err)
return
end
log.info("read dump file: ", data)
data = util.trim(data)
if #data == 0 then
log.error("dump file is empty")
return
end
local entity, err = core.json.decode(data)
if not entity then
log.error("decoded dump data got error: ", err, ", file content: ", data)
return
end
if not entity.services or not entity.last_update then
log.warn("decoded dump data miss fields, file content: ", data)
return
end
local now_time = ngx.time()
log.info("dump file last_update: ", entity.last_update, ", dump_params.expire: ",
dump_params.expire, ", now_time: ", now_time)
if dump_params.expire ~= 0 and (entity.last_update + dump_params.expire) < now_time then
log.warn("dump file: ", dump_params.path, " had expired, ignored it")
return
end
all_services = entity.services
log.info("load dump file into memory success")
end
local function write_dump_services()
local entity = {
services = all_services,
last_update = ngx.time(),
expire = dump_params.expire, -- later need handle it
}
local data = core.json.encode(entity)
local succ, err = util.write_file(dump_params.path, data)
if not succ then
log.error("write dump into file got error: ", err)
end
end
local function show_dump_file()
if not dump_params then
return 503, "dump params is nil"
end
local data, err = util.read_file(dump_params.path)
if not data then
return 503, err
end
return 200, data
end
local function get_retry_delay(retry_delay)
if not retry_delay or retry_delay >= max_retry_time then
retry_delay = 1
else
retry_delay = retry_delay * 4
end
return retry_delay
end
local function get_opts(consul_server, is_catalog)
local opts = {
host = consul_server.host,
port = consul_server.port,
connect_timeout = consul_server.connect_timeout,
read_timeout = consul_server.read_timeout,
default_args = {
token = consul_server.token,
}
}
if not consul_server.keepalive then
return opts
end
opts.default_args.wait = consul_server.wait_timeout --blocked wait!=0; unblocked by wait=0
if is_catalog then
opts.default_args.index = consul_server.catalog_index
else
opts.default_args.index = consul_server.health_index
end
return opts
end
local function watch_catalog(consul_server)
local client = resty_consul:new(get_opts(consul_server, true))
::RETRY::
local watch_result, watch_err = client:get(consul_server.consul_watch_catalog_url)
local watch_error_info = (watch_err ~= nil and watch_err)
or ((watch_result ~= nil and watch_result.status ~= 200)
and watch_result.status)
if watch_error_info then
log.error("connect consul: ", consul_server.consul_server_url,
" by sub url: ", consul_server.consul_watch_catalog_url,
", got watch result: ", json_delay_encode(watch_result),
", with error: ", watch_error_info)
return watch_type_catalog, default_catalog_error_index
end
if consul_server.catalog_index > 0
and consul_server.catalog_index == tonumber(watch_result.headers['X-Consul-Index']) then
local random_delay = math_random(default_random_range)
log.info("watch catalog has no change, re-watch consul after ", random_delay, " seconds")
core_sleep(random_delay)
goto RETRY
end
return watch_type_catalog, watch_result.headers['X-Consul-Index']
end
local function watch_health(consul_server)
local client = resty_consul:new(get_opts(consul_server, false))
::RETRY::
local watch_result, watch_err = client:get(consul_server.consul_watch_health_url)
local watch_error_info = (watch_err ~= nil and watch_err)
or ((watch_result ~= nil and watch_result.status ~= 200)
and watch_result.status)
if watch_error_info then
log.error("connect consul: ", consul_server.consul_server_url,
" by sub url: ", consul_server.consul_watch_health_url,
", got watch result: ", json_delay_encode(watch_result),
", with error: ", watch_error_info)
return watch_type_health, default_health_error_index
end
if consul_server.health_index > 0
and consul_server.health_index == tonumber(watch_result.headers['X-Consul-Index']) then
local random_delay = math_random(default_random_range)
log.info("watch health has no change, re-watch consul after ", random_delay, " seconds")
core_sleep(random_delay)
goto RETRY
end
return watch_type_health, watch_result.headers['X-Consul-Index']
end
local function check_keepalive(consul_server, retry_delay)
if consul_server.keepalive and not exiting() then
local ok, err = ngx_timer_at(0, _M.connect, consul_server, retry_delay)
if not ok then
log.error("create ngx_timer_at got error: ", err)
return
end
end
end
local function update_index(consul_server, catalog_index, health_index)
local c_index = 0
local h_index = 0
if catalog_index ~= nil then
c_index = tonumber(catalog_index)
end
if health_index ~= nil then
h_index = tonumber(health_index)
end
if c_index > 0 then
consul_server.catalog_index = c_index
end
if h_index > 0 then
consul_server.health_index = h_index
end
end
local function is_not_empty(value)
if value == nil or value == null
or (type(value) == "table" and not next(value))
or (type(value) == "string" and value == "")
then
return false
end
return true
end
local function watch_result_is_valid(watch_type, index, catalog_index, health_index)
if index <= 0 then
return false
end
if watch_type == watch_type_catalog then
if index == catalog_index then
return false
end
else
if index == health_index then
return false
end
end
return true
end
local function combine_sort_nodes_cmp(left, right)
if left.host ~= right.host then
return left.host < right.host
end
return left.port < right.port
end
local function port_sort_nodes_cmp(left, right)
return left.port < right.port
end
local function host_sort_nodes_cmp(left, right)
return left.host < right.host
end
function _M.connect(premature, consul_server, retry_delay)
if premature then
return
end
local catalog_thread, spawn_catalog_err = thread_spawn(watch_catalog, consul_server)
if not catalog_thread then
local random_delay = math_random(default_random_range)
log.error("failed to spawn thread watch catalog: ", spawn_catalog_err,
", retry connecting consul after ", random_delay, " seconds")
core_sleep(random_delay)
check_keepalive(consul_server, retry_delay)
return
end
local health_thread, err = thread_spawn(watch_health, consul_server)
if not health_thread then
thread_kill(catalog_thread)
local random_delay = math_random(default_random_range)
log.error("failed to spawn thread watch health: ", err, ", retry connecting consul after ",
random_delay, " seconds")
core_sleep(random_delay)
check_keepalive(consul_server, retry_delay)
return
end
local thread_wait_ok, watch_type, index = thread_wait(catalog_thread, health_thread)
thread_kill(catalog_thread)
thread_kill(health_thread)
if not thread_wait_ok then
local random_delay = math_random(default_random_range)
log.error("failed to wait thread: ", watch_type, ", retry connecting consul after ",
random_delay, " seconds")
core_sleep(random_delay)
check_keepalive(consul_server, retry_delay)
return
end
-- double check index has changed
if not watch_result_is_valid(tonumber(watch_type),
tonumber(index), consul_server.catalog_index, consul_server.health_index) then
retry_delay = get_retry_delay(retry_delay)
log.warn("get all svcs got err, retry connecting consul after ", retry_delay, " seconds")
core_sleep(retry_delay)
check_keepalive(consul_server, retry_delay)
return
end
local consul_client = resty_consul:new({
host = consul_server.host,
port = consul_server.port,
connect_timeout = consul_server.connect_timeout,
read_timeout = consul_server.read_timeout,
default_args = {
token = consul_server.token
}
})
local catalog_success, catalog_res, catalog_err = pcall(function()
return consul_client:get(consul_server.consul_watch_catalog_url)
end)
if not catalog_success then
log.error("connect consul: ", consul_server.consul_server_url,
" by sub url: ", consul_server.consul_watch_catalog_url,
", got catalog result: ", json_delay_encode(catalog_res))
check_keepalive(consul_server, retry_delay)
return
end
local catalog_error_info = (catalog_err ~= nil and catalog_err)
or ((catalog_res ~= nil and catalog_res.status ~= 200)
and catalog_res.status)
if catalog_error_info then
log.error("connect consul: ", consul_server.consul_server_url,
" by sub url: ", consul_server.consul_watch_catalog_url,
", got catalog result: ", json_delay_encode(catalog_res),
", with error: ", catalog_error_info)
retry_delay = get_retry_delay(retry_delay)
log.warn("get all svcs got err, retry connecting consul after ", retry_delay, " seconds")
core_sleep(retry_delay)
check_keepalive(consul_server, retry_delay)
return
end
-- get health index
local success, health_res, health_err = pcall(function()
return consul_client:get(consul_server.consul_watch_health_url)
end)
if not success then
log.error("connect consul: ", consul_server.consul_server_url,
" by sub url: ", consul_server.consul_watch_health_url,
", got health result: ", json_delay_encode(health_res))
check_keepalive(consul_server, retry_delay)
return
end
local health_error_info = (health_err ~= nil and health_err)
or ((health_res ~= nil and health_res.status ~= 200)
and health_res.status)
if health_error_info then
log.error("connect consul: ", consul_server.consul_server_url,
" by sub url: ", consul_server.consul_watch_health_url,
", got health result: ", json_delay_encode(health_res),
", with error: ", health_error_info)
retry_delay = get_retry_delay(retry_delay)
log.warn("get all svcs got err, retry connecting consul after ", retry_delay, " seconds")
core_sleep(retry_delay)
check_keepalive(consul_server, retry_delay)
return
end
log.info("connect consul: ", consul_server.consul_server_url,
", catalog_result status: ", catalog_res.status,
", catalog_result.headers.index: ", catalog_res.headers['X-Consul-Index'],
", consul_server.index: ", consul_server.index,
", consul_server: ", json_delay_encode(consul_server))
-- if the current index is different from the last index, then update the service
if (consul_server.catalog_index ~= tonumber(catalog_res.headers['X-Consul-Index']))
or (consul_server.health_index ~= tonumber(health_res.headers['X-Consul-Index'])) then
local up_services = core.table.new(0, #catalog_res.body)
for service_name, _ in pairs(catalog_res.body) do
-- check if the service_name is 'skip service'
if skip_service_map[service_name] then
goto CONTINUE
end
-- get node from service
local svc_url = consul_server.consul_sub_url .. "/" .. service_name
local svc_success, result, get_err = pcall(function()
return consul_client:get(svc_url, {passing = true})
end)
local error_info = (get_err ~= nil and get_err) or
((result ~= nil and result.status ~= 200) and result.status)
if not svc_success or error_info then
log.error("connect consul: ", consul_server.consul_server_url,
", by service url: ", svc_url, ", with error: ", error_info)
goto CONTINUE
end
-- decode body, decode json, update service, error handling
-- check result body is not nil and not empty
if is_not_empty(result.body) then
-- add services to table
local nodes = up_services[service_name]
local nodes_uniq = {}
for _, node in ipairs(result.body) do
if not node.Service then
goto CONTINUE
end
local svc_address, svc_port = node.Service.Address, node.Service.Port
-- Handle nil or 0 port case - default to 80 for HTTP services
if not svc_port or svc_port == 0 then
svc_port = 80
end
-- if nodes is nil, new nodes table and set to up_services
if not nodes then
nodes = core.table.new(1, 0)
up_services[service_name] = nodes
end
-- not store duplicate service IDs.
local service_id = svc_address .. ":" .. svc_port
if not nodes_uniq[service_id] then
-- add node to nodes table
core.table.insert(nodes, {
host = svc_address,
port = tonumber(svc_port),
weight = default_weight,
})
nodes_uniq[service_id] = true
end
end
if nodes then
if sort_type == "port_sort" then
core.table.sort(nodes, port_sort_nodes_cmp)
elseif sort_type == "host_sort" then
core.table.sort(nodes, host_sort_nodes_cmp)
elseif sort_type == "combine_sort" then
core.table.sort(nodes, combine_sort_nodes_cmp)
end
end
up_services[service_name] = nodes
end
:: CONTINUE ::
end
update_all_services(consul_server.consul_server_url, up_services)
--update events
local post_ok, post_err = events:post(events_list._source,
events_list.updating, all_services)
if not post_ok then
log.error("post_event failure with ", events_list._source,
", update all services error: ", post_err)
end
if dump_params then
ngx_timer_at(0, write_dump_services)
end
update_index(consul_server,
catalog_res.headers['X-Consul-Index'],
health_res.headers['X-Consul-Index'])
end
check_keepalive(consul_server, retry_delay)
end
local function format_consul_params(consul_conf)
local consul_server_list = core.table.new(0, #consul_conf.servers)
for _, v in pairs(consul_conf.servers) do
local scheme, host, port, path = unpack(http.parse_uri(nil, v))
if scheme ~= "http" then
return nil, "only support consul http schema address, eg: http://address:port"
elseif path ~= "/" or core.string.has_suffix(v, '/') then
return nil, "invalid consul server address, the valid format: http://address:port"
end
core.table.insert(consul_server_list, {
host = host,
port = port,
token = consul_conf.token,
connect_timeout = consul_conf.timeout.connect,
read_timeout = consul_conf.timeout.read,
wait_timeout = consul_conf.timeout.wait,
consul_watch_catalog_url = "/catalog/services",
consul_sub_url = "/health/service",
consul_watch_health_url = "/health/state/any",
consul_server_url = v .. "/v1",
weight = consul_conf.weight,
keepalive = consul_conf.keepalive,
health_index = 0,
catalog_index = 0,
fetch_interval = consul_conf.fetch_interval -- fetch interval to next connect consul
})
end
return consul_server_list, nil
end
function _M.init_worker()
local consul_conf = local_conf.discovery.consul
if consul_conf.dump then
local dump = consul_conf.dump
dump_params = dump
if dump.load_on_init then
read_dump_services()
end
end
events = require("apisix.events")
events_list = events:event_list(
"discovery_consul_update_all_services",
"updating"
)
if 0 ~= ngx_worker_id() then
events:register(discovery_consul_callback, events_list._source, events_list.updating)
return
end
log.notice("consul_conf: ", json_delay_encode(consul_conf, true))
default_weight = consul_conf.weight
sort_type = consul_conf.sort_type
-- set default service, used when the server node cannot be found
if consul_conf.default_service then
default_service = consul_conf.default_service
default_service.weight = default_weight
end
if consul_conf.skip_services then
skip_service_map = core.table.new(0, #consul_conf.skip_services)
for _, v in ipairs(consul_conf.skip_services) do
skip_service_map[v] = true
end
end
-- set up default skip service
for _, v in ipairs(default_skip_services) do
skip_service_map[v] = true
end
local consul_servers_list, err = format_consul_params(consul_conf)
if err then
error("format consul config got error: " .. err)
end
log.info("consul_server_list: ", json_delay_encode(consul_servers_list, true))
consul_services = core.table.new(0, 1)
-- success or failure
for _, server in ipairs(consul_servers_list) do
local ok, err = ngx_timer_at(0, _M.connect, server)
if not ok then
error("create consul got error: " .. err)
end
if server.keepalive == false then
ngx_timer_every(server.fetch_interval, _M.connect, server)
end
end
end
function _M.dump_data()
return {config = local_conf.discovery.consul, services = all_services }
end
function _M.control_api()
return {
{
methods = {"GET"},
uris = {"/show_dump_file"},
handler = show_dump_file,
}
}
end
return _M

View File

@@ -0,0 +1,92 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
return {
type = "object",
properties = {
servers = {
type = "array",
minItems = 1,
items = {
type = "string",
}
},
token = {type = "string", default = ""},
fetch_interval = {type = "integer", minimum = 1, default = 3},
keepalive = {
type = "boolean",
default = true
},
weight = {type = "integer", minimum = 1, default = 1},
timeout = {
type = "object",
properties = {
connect = {type = "integer", minimum = 1, default = 2000},
read = {type = "integer", minimum = 1, default = 2000},
wait = {type = "integer", minimum = 1, default = 60}
},
default = {
connect = 2000,
read = 2000,
wait = 60,
}
},
sort_type = {
type = "string",
enum = {"origin", "host_sort", "port_sort", "combine_sort"},
default = "origin",
},
skip_services = {
type = "array",
minItems = 1,
items = {
type = "string",
}
},
dump = {
type = "object",
properties = {
path = {type = "string", minLength = 1},
load_on_init = {type = "boolean", default = true},
expire = {type = "integer", default = 0},
},
required = {"path"},
},
default_service = {
type = "object",
properties = {
host = {type = "string"},
port = {type = "integer"},
metadata = {
type = "object",
properties = {
fail_timeout = {type = "integer", default = 1},
weight = {type = "integer", default = 1},
max_fails = {type = "integer", default = 1}
},
default = {
fail_timeout = 1,
weight = 1,
max_fails = 1
}
}
}
}
},
required = {"servers"}
}

View File

@@ -0,0 +1,439 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local require = require
local local_conf = require("apisix.core.config_local").local_conf()
local core = require("apisix.core")
local core_sleep = require("apisix.core.utils").sleep
local resty_consul = require('resty.consul')
local cjson = require('cjson')
local http = require('resty.http')
local util = require("apisix.cli.util")
local ipairs = ipairs
local error = error
local ngx = ngx
local unpack = unpack
local ngx_re_match = ngx.re.match
local tonumber = tonumber
local pairs = pairs
local ipairs = ipairs
local ngx_timer_at = ngx.timer.at
local ngx_timer_every = ngx.timer.every
local log = core.log
local ngx_decode_base64 = ngx.decode_base64
local json_delay_encode = core.json.delay_encode
local cjson_null = cjson.null
local applications = core.table.new(0, 5)
local default_service
local default_weight
local default_prefix_rule
local skip_keys_map = core.table.new(0, 1)
local dump_params
local events
local events_list
local consul_apps
local _M = {
version = 0.3,
}
local function discovery_consul_callback(data, event, source, pid)
applications = data
log.notice("update local variable application, event is: ", event,
"source: ", source, "server pid:", pid,
", application: ", core.json.encode(applications, true))
end
function _M.all_nodes()
return applications
end
function _M.nodes(service_name)
if not applications then
log.error("application is nil, failed to fetch nodes for : ", service_name)
return
end
local resp_list = applications[service_name]
if not resp_list then
log.error("fetch nodes failed by ", service_name, ", return default service")
return default_service and {default_service}
end
log.info("process id: ", ngx.worker.id(), ", applications[", service_name, "] = ",
json_delay_encode(resp_list, true))
return resp_list
end
local function parse_instance(node, server_name_prefix)
local key = node.Key
if key == cjson_null or not key or #key == 0 then
log.error("consul_key_empty, server_name_prefix: ", server_name_prefix,
", node: ", json_delay_encode(node, true))
return false
end
local result = ngx_re_match(key, default_prefix_rule, "jo")
if not result then
log.error("server name parse error, server_name_prefix: ", server_name_prefix,
", node: ", json_delay_encode(node, true))
return false
end
local sn, host, port = result[1], result[2], result[3]
-- if exist, skip special kesy
if sn and skip_keys_map[sn] then
return false
end
-- base64 value = "IHsid2VpZ2h0IjogMTIwLCAibWF4X2ZhaWxzIjogMiwgImZhaWxfdGltZW91dCI6IDJ9"
-- ori value = "{"weight": 120, "max_fails": 2, "fail_timeout": 2}"
local metadataBase64 = node.Value
if metadataBase64 == cjson_null or not metadataBase64 or #metadataBase64 == 0 then
log.error("error: consul_value_empty, server_name_prefix: ", server_name_prefix,
", node: ", json_delay_encode(node, true))
return false
end
local metadata, err = core.json.decode(ngx_decode_base64(metadataBase64))
if err then
log.error("invalid upstream value, server_name_prefix: ", server_name_prefix,
",err: ", err, ", node: ", json_delay_encode(node, true))
return false
elseif metadata.check_status == false or metadata.check_status == "false" then
log.error("server node unhealthy, server_name_prefix: ", server_name_prefix,
", node: ", json_delay_encode(node, true))
return false
end
return true, host, tonumber(port), metadata, sn
end
local function update_application(server_name_prefix, data)
local sn
local up_apps = core.table.new(0, #data)
local weight = default_weight
for _, node in ipairs(data) do
local succ, ip, port, metadata, server_name = parse_instance(node, server_name_prefix)
if succ then
sn = server_name_prefix .. server_name
local nodes = up_apps[sn]
if not nodes then
nodes = core.table.new(1, 0)
up_apps[sn] = nodes
end
core.table.insert(nodes, {
host = ip,
port = port,
weight = metadata and metadata.weight or weight,
})
end
end
-- clean old unused data
local old_apps = consul_apps[server_name_prefix] or {}
for k, _ in pairs(old_apps) do
applications[k] = nil
end
core.table.clear(old_apps)
for k, v in pairs(up_apps) do
applications[k] = v
end
consul_apps[server_name_prefix] = up_apps
log.info("update applications: ", core.json.encode(applications))
end
local function read_dump_srvs()
local data, err = util.read_file(dump_params.path)
if not data then
log.notice("read dump file get error: ", err)
return
end
log.info("read dump file: ", data)
data = util.trim(data)
if #data == 0 then
log.error("dump file is empty")
return
end
local entity, err = core.json.decode(data)
if not entity then
log.error("decoded dump data got error: ", err, ", file content: ", data)
return
end
if not entity.services or not entity.last_update then
log.warn("decoded dump data miss fields, file content: ", data)
return
end
local now_time = ngx.time()
log.info("dump file last_update: ", entity.last_update, ", dump_params.expire: ",
dump_params.expire, ", now_time: ", now_time)
if dump_params.expire ~= 0 and (entity.last_update + dump_params.expire) < now_time then
log.warn("dump file: ", dump_params.path, " had expired, ignored it")
return
end
applications = entity.services
log.info("load dump file into memory success")
end
local function write_dump_srvs()
local entity = {
services = applications,
last_update = ngx.time(),
expire = dump_params.expire, -- later need handle it
}
local data = core.json.encode(entity)
local succ, err = util.write_file(dump_params.path, data)
if not succ then
log.error("write dump into file got error: ", err)
end
end
local function show_dump_file()
if not dump_params then
return 503, "dump params is nil"
end
local data, err = util.read_file(dump_params.path)
if not data then
return 503, err
end
return 200, data
end
function _M.connect(premature, consul_server, retry_delay)
if premature then
return
end
local consul_client = resty_consul:new({
host = consul_server.host,
port = consul_server.port,
connect_timeout = consul_server.connect_timeout,
read_timeout = consul_server.read_timeout,
default_args = consul_server.default_args,
})
log.info("consul_server: ", json_delay_encode(consul_server, true))
local result, err = consul_client:get(consul_server.consul_key)
local error_info = (err ~= nil and err)
or ((result ~= nil and result.status ~= 200)
and result.status)
if error_info then
log.error("connect consul: ", consul_server.server_name_key,
" by key: ", consul_server.consul_key,
", got result: ", json_delay_encode(result, true),
", with error: ", error_info)
if not retry_delay then
retry_delay = 1
else
retry_delay = retry_delay * 4
end
log.warn("retry connecting consul after ", retry_delay, " seconds")
core_sleep(retry_delay)
goto ERR
end
log.info("connect consul: ", consul_server.server_name_key,
", result status: ", result.status,
", result.headers.index: ", result.headers['X-Consul-Index'],
", result body: ", json_delay_encode(result.body))
-- if current index different last index then update application
if consul_server.index ~= result.headers['X-Consul-Index'] then
consul_server.index = result.headers['X-Consul-Index']
-- only long connect type use index
if consul_server.keepalive then
consul_server.default_args.index = result.headers['X-Consul-Index']
end
-- decode body, decode json, update application, error handling
if result.body and #result.body ~= 0 then
log.notice("server_name: ", consul_server.server_name_key,
", header: ", core.json.encode(result.headers, true),
", body: ", core.json.encode(result.body, true))
update_application(consul_server.server_name_key, result.body)
--update events
local ok, err = events:post(events_list._source, events_list.updating, applications)
if not ok then
log.error("post_event failure with ", events_list._source,
", update application error: ", err)
end
if dump_params then
ngx_timer_at(0, write_dump_srvs)
end
end
end
:: ERR ::
local keepalive = consul_server.keepalive
if keepalive then
local ok, err = ngx_timer_at(0, _M.connect, consul_server, retry_delay)
if not ok then
log.error("create ngx_timer_at got error: ", err)
return
end
end
end
local function format_consul_params(consul_conf)
local consul_server_list = core.table.new(0, #consul_conf.servers)
local args = {
token = consul_conf.token,
recurse = true
}
if consul_conf.keepalive then
args.wait = consul_conf.timeout.wait --blocked wait!=0; unblocked by wait=0
args.index = 0
end
for _, v in pairs(consul_conf.servers) do
local scheme, host, port, path = unpack(http.parse_uri(nil, v))
if scheme ~= "http" then
return nil, "only support consul http schema address, eg: http://address:port"
elseif path ~= "/" or core.string.has_suffix(v, '/') then
return nil, "invalid consul server address, the valid format: http://address:port"
end
core.table.insert(consul_server_list, {
host = host,
port = port,
connect_timeout = consul_conf.timeout.connect,
read_timeout = consul_conf.timeout.read,
consul_key = "/kv/" .. consul_conf.prefix,
server_name_key = v .. "/v1/kv/",
weight = consul_conf.weight,
keepalive = consul_conf.keepalive,
default_args = args,
index = 0,
fetch_interval = consul_conf.fetch_interval -- fetch interval to next connect consul
})
end
return consul_server_list
end
function _M.init_worker()
local consul_conf = local_conf.discovery.consul_kv
if consul_conf.dump then
local dump = consul_conf.dump
dump_params = dump
if dump.load_on_init then
read_dump_srvs()
end
end
events = require("apisix.events")
events_list = events:event_list(
"discovery_consul_update_application",
"updating"
)
if 0 ~= ngx.worker.id() then
events:register(discovery_consul_callback, events_list._source, events_list.updating)
return
end
log.notice("consul_conf: ", core.json.encode(consul_conf))
default_weight = consul_conf.weight
-- set default service, used when the server node cannot be found
if consul_conf.default_service then
default_service = consul_conf.default_service
default_service.weight = default_weight
end
default_prefix_rule = "(" .. consul_conf.prefix .. "/.*/)([a-zA-Z0-9.]+):([0-9]+)"
log.info("default params, default_weight: ", default_weight,
", default_prefix_rule: ", default_prefix_rule)
if consul_conf.skip_keys then
skip_keys_map = core.table.new(0, #consul_conf.skip_keys)
for _, v in ipairs(consul_conf.skip_keys) do
skip_keys_map[v] = true
end
end
local consul_servers_list, err = format_consul_params(consul_conf)
if err then
error(err)
return
end
log.info("consul_server_list: ", core.json.encode(consul_servers_list))
consul_apps = core.table.new(0, 1)
-- success or failure
for _, server in ipairs(consul_servers_list) do
local ok, err = ngx_timer_at(0, _M.connect, server)
if not ok then
error("create consul_kv got error: " .. err)
return
end
if server.keepalive == false then
ngx_timer_every(server.fetch_interval, _M.connect, server)
end
end
end
function _M.dump_data()
return {config = local_conf.discovery.consul_kv, services = applications}
end
function _M.control_api()
return {
{
methods = {"GET"},
uris = {"/show_dump_file"},
handler = show_dump_file,
}
}
end
return _M

View File

@@ -0,0 +1,88 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
return {
type = "object",
properties = {
servers = {
type = "array",
minItems = 1,
items = {
type = "string",
}
},
token = {type = "string", default = ""},
fetch_interval = {type = "integer", minimum = 1, default = 3},
keepalive = {
type = "boolean",
default = true
},
prefix = {type = "string", default = "upstreams"},
weight = {type = "integer", minimum = 1, default = 1},
timeout = {
type = "object",
properties = {
connect = {type = "integer", minimum = 1, default = 2000},
read = {type = "integer", minimum = 1, default = 2000},
wait = {type = "integer", minimum = 1, default = 60}
},
default = {
connect = 2000,
read = 2000,
wait = 60,
}
},
skip_keys = {
type = "array",
minItems = 1,
items = {
type = "string",
}
},
dump = {
type = "object",
properties = {
path = {type = "string", minLength = 1},
load_on_init = {type = "boolean", default = true},
expire = {type = "integer", default = 0},
},
required = {"path"},
},
default_service = {
type = "object",
properties = {
host = {type = "string"},
port = {type = "integer"},
metadata = {
type = "object",
properties = {
fail_timeout = {type = "integer", default = 1},
weight = {type = "integer", default = 1},
max_fails = {type = "integer", default = 1}
},
default = {
fail_timeout = 1,
weight = 1,
max_fails = 1
}
}
}
}
},
required = {"servers"}
}

View File

@@ -0,0 +1,89 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local config_local = require("apisix.core.config_local")
local is_http = ngx.config.subsystem == "http"
local ipairs = ipairs
local error = error
local dns_client
local _M = {}
function _M.nodes(service_name)
local host, port = core.utils.parse_addr(service_name)
core.log.info("discovery dns with host ", host, ", port ", port)
local records, err = dns_client:resolve(host, core.dns_client.RETURN_ALL)
if not records then
return nil, err
end
local nodes = core.table.new(#records, 0)
local index = 1
for _, r in ipairs(records) do
if r.address then
local node_port = port
if not node_port and r.port ~= 0 then
-- if the port is zero, fallback to use the default
node_port = r.port
end
-- ignore zero port when subsystem is stream
if node_port or is_http then
nodes[index] = {host = r.address, weight = r.weight or 1, port = node_port}
if r.priority then
-- for SRV record, nodes with lower priority are chosen first
nodes[index].priority = -r.priority
end
index = index + 1
end
end
end
return nodes
end
function _M.init_worker()
local local_conf = config_local.local_conf()
local servers = local_conf.discovery.dns.servers
local resolv_conf = local_conf.discovery.dns.resolv_conf
local default_order = {"last", "SRV", "A", "AAAA", "CNAME"}
local order = core.table.try_read_attr(local_conf, "discovery", "dns", "order")
order = order or default_order
local opts = {
hosts = {},
resolvConf = resolv_conf,
nameservers = servers,
order = order,
}
local client, err = core.dns_client.new(opts)
if not client then
error("failed to init the dns client: ", err)
return
end
dns_client = client
end
return _M

View File

@@ -0,0 +1,48 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
return {
type = "object",
properties = {
servers = {
type = "array",
minItems = 1,
items = {
type = "string",
},
},
resolv_conf = {
type = "string",
},
order = {
type = "array",
minItems = 1,
maxItems = 5,
uniqueItems = true,
items = {
enum = {"last", "SRV", "A", "AAAA", "CNAME"}
},
},
},
oneOf = {
{
required = {"servers"},
},
{
required = {"resolv_conf"},
}
}
}

View File

@@ -0,0 +1,223 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local local_conf = require("apisix.core.config_local").local_conf()
local http = require("resty.http")
local core = require("apisix.core")
local ipmatcher = require("resty.ipmatcher")
local ipairs = ipairs
local tostring = tostring
local type = type
local math_random = math.random
local ngx = ngx
local ngx_timer_at = ngx.timer.at
local ngx_timer_every = ngx.timer.every
local string_sub = string.sub
local str_find = core.string.find
local log = core.log
local default_weight
local applications
local _M = {
version = 0.1,
}
local function service_info()
local host = local_conf.discovery and
local_conf.discovery.eureka and local_conf.discovery.eureka.host
if not host then
log.error("do not set eureka.host")
return
end
local basic_auth
-- TODO Add health check to get healthy nodes.
local url = host[math_random(#host)]
local auth_idx = str_find(url, "@")
if auth_idx then
local protocol_idx = str_find(url, "://")
local protocol = string_sub(url, 1, protocol_idx + 2)
local user_and_password = string_sub(url, protocol_idx + 3, auth_idx - 1)
local other = string_sub(url, auth_idx + 1)
url = protocol .. other
basic_auth = "Basic " .. ngx.encode_base64(user_and_password)
end
if local_conf.discovery.eureka.prefix then
url = url .. local_conf.discovery.eureka.prefix
end
if string_sub(url, #url) ~= "/" then
url = url .. "/"
end
return url, basic_auth
end
local function request(request_uri, basic_auth, method, path, query, body)
log.info("eureka uri:", request_uri, ".")
local url = request_uri .. path
local headers = core.table.new(0, 5)
headers['Connection'] = 'Keep-Alive'
headers['Accept'] = 'application/json'
if basic_auth then
headers['Authorization'] = basic_auth
end
if body and 'table' == type(body) then
local err
body, err = core.json.encode(body)
if not body then
return nil, 'invalid body : ' .. err
end
-- log.warn(method, url, body)
headers['Content-Type'] = 'application/json'
end
local httpc = http.new()
local timeout = local_conf.discovery.eureka.timeout
local connect_timeout = timeout and timeout.connect or 2000
local send_timeout = timeout and timeout.send or 2000
local read_timeout = timeout and timeout.read or 5000
log.info("connect_timeout:", connect_timeout, ", send_timeout:", send_timeout,
", read_timeout:", read_timeout, ".")
httpc:set_timeouts(connect_timeout, send_timeout, read_timeout)
return httpc:request_uri(url, {
version = 1.1,
method = method,
headers = headers,
query = query,
body = body,
ssl_verify = false,
})
end
local function parse_instance(instance)
local status = instance.status
local overridden_status = instance.overriddenstatus or instance.overriddenStatus
if overridden_status and overridden_status ~= "UNKNOWN" then
status = overridden_status
end
if status ~= "UP" then
return
end
local port
if tostring(instance.port["@enabled"]) == "true" and instance.port["$"] then
port = instance.port["$"]
-- secure = false
end
if tostring(instance.securePort["@enabled"]) == "true" and instance.securePort["$"] then
port = instance.securePort["$"]
-- secure = true
end
local ip = instance.ipAddr
if not ipmatcher.parse_ipv4(ip) and
not ipmatcher.parse_ipv6(ip) then
log.error(instance.app, " service ", instance.hostName, " node IP ", ip,
" is invalid(must be IPv4 or IPv6).")
return
end
return ip, port, instance.metadata
end
local function fetch_full_registry(premature)
if premature then
return
end
local request_uri, basic_auth = service_info()
if not request_uri then
return
end
local res, err = request(request_uri, basic_auth, "GET", "apps")
if not res then
log.error("failed to fetch registry", err)
return
end
if not res.body or res.status ~= 200 then
log.error("failed to fetch registry, status = ", res.status)
return
end
local json_str = res.body
local data, err = core.json.decode(json_str)
if not data then
log.error("invalid response body: ", json_str, " err: ", err)
return
end
local apps = data.applications.application
local up_apps = core.table.new(0, #apps)
for _, app in ipairs(apps) do
for _, instance in ipairs(app.instance) do
local ip, port, metadata = parse_instance(instance)
if ip and port then
local nodes = up_apps[app.name]
if not nodes then
nodes = core.table.new(#app.instance, 0)
up_apps[app.name] = nodes
end
core.table.insert(nodes, {
host = ip,
port = port,
weight = metadata and metadata.weight or default_weight,
metadata = metadata,
})
if metadata then
-- remove useless data
metadata.weight = nil
end
end
end
end
applications = up_apps
end
function _M.nodes(service_name)
if not applications then
log.error("failed to fetch nodes for : ", service_name)
return
end
return applications[service_name]
end
function _M.init_worker()
default_weight = local_conf.discovery.eureka.weight or 100
log.info("default_weight:", default_weight, ".")
local fetch_interval = local_conf.discovery.eureka.fetch_interval or 30
log.info("fetch_interval:", fetch_interval, ".")
ngx_timer_at(0, fetch_full_registry)
ngx_timer_every(fetch_interval, fetch_full_registry)
end
function _M.dump_data()
return {config = local_conf.discovery.eureka, services = applications or {}}
end
return _M

View File

@@ -0,0 +1,40 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
return {
type = "object",
properties = {
host = {
type = "array",
minItems = 1,
items = {
type = "string",
},
},
fetch_interval = {type = "integer", minimum = 1, default = 30},
prefix = {type = "string"},
weight = {type = "integer", minimum = 0},
timeout = {
type = "object",
properties = {
connect = {type = "integer", minimum = 1, default = 2000},
send = {type = "integer", minimum = 1, default = 2000},
read = {type = "integer", minimum = 1, default = 5000},
}
},
},
required = {"host"}
}

View File

@@ -0,0 +1,43 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local log = require("apisix.core.log")
local local_conf = require("apisix.core.config_local").local_conf()
local pairs = pairs
local discovery_type = local_conf.discovery
local discovery = {}
if discovery_type then
for discovery_name, _ in pairs(discovery_type) do
log.info("use discovery: ", discovery_name)
discovery[discovery_name] = require("apisix.discovery." .. discovery_name)
end
end
function discovery.init_worker()
if discovery_type then
for discovery_name, _ in pairs(discovery_type) do
discovery[discovery_name].init_worker()
end
end
end
return {
version = 0.1,
discovery = discovery
}

View File

@@ -0,0 +1,377 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local ngx = ngx
local ipairs = ipairs
local string = string
local math = math
local type = type
local core = require("apisix.core")
local http = require("resty.http")
local function list_query(informer)
local arguments = {
limit = informer.limit,
}
if informer.continue and informer.continue ~= "" then
arguments.continue = informer.continue
end
if informer.label_selector and informer.label_selector ~= "" then
arguments.labelSelector = informer.label_selector
end
if informer.field_selector and informer.field_selector ~= "" then
arguments.fieldSelector = informer.field_selector
end
return ngx.encode_args(arguments)
end
local function list(httpc, apiserver, informer)
local response, err = httpc:request({
path = informer.path,
query = list_query(informer),
headers = {
["Host"] = apiserver.host .. ":" .. apiserver.port,
["Authorization"] = "Bearer " .. apiserver.token,
["Accept"] = "application/json",
["Connection"] = "keep-alive"
}
})
core.log.info("--raw=", informer.path, "?", list_query(informer))
if not response then
return false, "RequestError", err or ""
end
if response.status ~= 200 then
return false, response.reason, response:read_body() or ""
end
local body, err = response:read_body()
if err then
return false, "ReadBodyError", err
end
local data = core.json.decode(body)
if not data or data.kind ~= informer.list_kind then
return false, "UnexpectedBody", body
end
informer.version = data.metadata.resourceVersion
if informer.on_added then
for _, item in ipairs(data.items or {}) do
informer:on_added(item, "list")
end
end
informer.continue = data.metadata.continue
if informer.continue and informer.continue ~= "" then
list(httpc, apiserver, informer)
end
return true
end
local function watch_query(informer)
local arguments = {
watch = "true",
allowWatchBookmarks = "true",
timeoutSeconds = informer.overtime,
}
if informer.version and informer.version ~= "" then
arguments.resourceVersion = informer.version
end
if informer.label_selector and informer.label_selector ~= "" then
arguments.labelSelector = informer.label_selector
end
if informer.field_selector and informer.field_selector ~= "" then
arguments.fieldSelector = informer.field_selector
end
return ngx.encode_args(arguments)
end
local function split_event (body, callback, ...)
local gmatch_iterator, err = ngx.re.gmatch(body, "{\"type\":.*}\n", "jao")
if not gmatch_iterator then
return false, nil, "GmatchError", err
end
local captures
local captured_size = 0
local ok, reason
while true do
captures, err = gmatch_iterator()
if err then
return false, nil, "GmatchError", err
end
if not captures then
break
end
captured_size = captured_size + #captures[0]
ok, reason, err = callback(captures[0], ...)
if not ok then
return false, nil, reason, err
end
end
local remainder_body
if captured_size == #body then
remainder_body = ""
elseif captured_size == 0 then
remainder_body = body
elseif captured_size < #body then
remainder_body = string.sub(body, captured_size + 1)
end
return true, remainder_body
end
local function dispatch_event(event_string, informer)
local event = core.json.decode(event_string)
if not event or not event.type or not event.object then
return false, "UnexpectedBody", event_string
end
local tp = event.type
if tp == "ERROR" then
if event.object.code == 410 then
return false, "ResourceGone", nil
end
return false, "UnexpectedBody", event_string
end
local object = event.object
informer.version = object.metadata.resourceVersion
if tp == "ADDED" then
if informer.on_added then
informer:on_added(object, "watch")
end
elseif tp == "DELETED" then
if informer.on_deleted then
informer:on_deleted(object)
end
elseif tp == "MODIFIED" then
if informer.on_modified then
informer:on_modified(object)
end
-- elseif type == "BOOKMARK" then
-- do nothing
end
return true
end
local function watch(httpc, apiserver, informer)
local watch_times = 8
for _ = 1, watch_times do
local watch_seconds = 1800 + math.random(9, 999)
informer.overtime = watch_seconds
local http_seconds = watch_seconds + 120
httpc:set_timeouts(2000, 3000, http_seconds * 1000)
local response, err = httpc:request({
path = informer.path,
query = watch_query(informer),
headers = {
["Host"] = apiserver.host .. ":" .. apiserver.port,
["Authorization"] = "Bearer " .. apiserver.token,
["Accept"] = "application/json",
["Connection"] = "keep-alive"
}
})
core.log.info("--raw=", informer.path, "?", watch_query(informer))
if err then
return false, "RequestError", err
end
if response.status ~= 200 then
return false, response.reason, response:read_body() or ""
end
local ok
local remainder_body
local body
local reason
while true do
body, err = response.body_reader()
if err then
return false, "ReadBodyError", err
end
if not body then
break
end
if remainder_body and #remainder_body > 0 then
body = remainder_body .. body
end
ok, remainder_body, reason, err = split_event(body, dispatch_event, informer)
if not ok then
if reason == "ResourceGone" then
return true
end
return false, reason, err
end
end
end
return true
end
local function list_watch(informer, apiserver)
local ok
local reason, message
local httpc = http.new()
informer.continue = ""
informer.version = ""
informer.fetch_state = "connecting"
core.log.info("begin to connect ", apiserver.host, ":", apiserver.port)
ok, message = httpc:connect({
scheme = apiserver.schema,
host = apiserver.host,
port = apiserver.port,
ssl_verify = false
})
if not ok then
informer.fetch_state = "connect failed"
core.log.error("connect apiserver failed, apiserver.host: ", apiserver.host,
", apiserver.port: ", apiserver.port, ", message : ", message)
return false
end
core.log.info("begin to list ", informer.kind)
informer.fetch_state = "listing"
if informer.pre_List then
informer:pre_list()
end
ok, reason, message = list(httpc, apiserver, informer)
if not ok then
informer.fetch_state = "list failed"
core.log.error("list failed, kind: ", informer.kind,
", reason: ", reason, ", message : ", message)
return false
end
informer.fetch_state = "list finished"
if informer.post_List then
informer:post_list()
end
core.log.info("begin to watch ", informer.kind)
informer.fetch_state = "watching"
ok, reason, message = watch(httpc, apiserver, informer)
if not ok then
informer.fetch_state = "watch failed"
core.log.error("watch failed, kind: ", informer.kind,
", reason: ", reason, ", message : ", message)
return false
end
informer.fetch_state = "watch finished"
return true
end
local _M = {
}
function _M.new(group, version, kind, plural, namespace)
local tp
tp = type(group)
if tp ~= "nil" and tp ~= "string" then
return nil, "group should set to string or nil type but " .. tp
end
tp = type(namespace)
if tp ~= "nil" and tp ~= "string" then
return nil, "namespace should set to string or nil type but " .. tp
end
tp = type(version)
if tp ~= "string" or version == "" then
return nil, "version should set to non-empty string"
end
tp = type(kind)
if tp ~= "string" or kind == "" then
return nil, "kind should set to non-empty string"
end
tp = type(plural)
if tp ~= "string" or plural == "" then
return nil, "plural should set to non-empty string"
end
local path = ""
if group == nil or group == "" then
path = path .. "/api/" .. version
else
path = path .. "/apis/" .. group .. "/" .. version
end
if namespace and namespace ~= "" then
path = path .. "/namespaces/" .. namespace
end
path = path .. "/" .. plural
return {
kind = kind,
list_kind = kind .. "List",
plural = plural,
path = path,
limit = 120,
label_selector = "",
field_selector = "",
overtime = "1800",
version = "",
continue = "",
list_watch = list_watch
}
end
return _M

View File

@@ -0,0 +1,694 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local ngx = ngx
local ipairs = ipairs
local pairs = pairs
local string = string
local tonumber = tonumber
local tostring = tostring
local os = os
local error = error
local pcall = pcall
local setmetatable = setmetatable
local is_http = ngx.config.subsystem == "http"
local process = require("ngx.process")
local core = require("apisix.core")
local util = require("apisix.cli.util")
local local_conf = require("apisix.core.config_local").local_conf()
local informer_factory = require("apisix.discovery.kubernetes.informer_factory")
local ctx
local endpoint_lrucache = core.lrucache.new({
ttl = 300,
count = 1024
})
local endpoint_buffer = {}
local function sort_nodes_cmp(left, right)
if left.host ~= right.host then
return left.host < right.host
end
return left.port < right.port
end
local function on_endpoint_slices_modified(handle, endpoint)
if handle.namespace_selector and
not handle:namespace_selector(endpoint.metadata.namespace) then
return
end
core.log.debug(core.json.delay_encode(endpoint))
core.table.clear(endpoint_buffer)
local endpointslices = endpoint.endpoints
for _, endpointslice in ipairs(endpointslices or {}) do
if endpointslice.addresses then
local addresses = endpointslices.addresses
for _, port in ipairs(endpoint.ports or {}) do
local port_name
if port.name then
port_name = port.name
elseif port.targetPort then
port_name = tostring(port.targetPort)
else
port_name = tostring(port.port)
end
if endpointslice.conditions and endpointslice.condition.ready then
local nodes = endpoint_buffer[port_name]
if nodes == nil then
nodes = core.table.new(0, #endpointslices * #addresses)
endpoint_buffer[port_name] = nodes
end
for _, address in ipairs(endpointslices.addresses) do
core.table.insert(nodes, {
host = address.ip,
port = port.port,
weight = handle.default_weight
})
end
end
end
end
end
for _, ports in pairs(endpoint_buffer) do
for _, nodes in pairs(ports) do
core.table.sort(nodes, sort_nodes_cmp)
end
end
local endpoint_key = endpoint.metadata.namespace .. "/" .. endpoint.metadata.name
local endpoint_content = core.json.encode(endpoint_buffer, true)
local endpoint_version = ngx.crc32_long(endpoint_content)
local _, err
_, err = handle.endpoint_dict:safe_set(endpoint_key .. "#version", endpoint_version)
if err then
core.log.error("set endpoint version into discovery DICT failed, ", err)
return
end
_, err = handle.endpoint_dict:safe_set(endpoint_key, endpoint_content)
if err then
core.log.error("set endpoint into discovery DICT failed, ", err)
handle.endpoint_dict:delete(endpoint_key .. "#version")
end
end
local function on_endpoint_modified(handle, endpoint)
if handle.namespace_selector and
not handle:namespace_selector(endpoint.metadata.namespace) then
return
end
core.log.debug(core.json.delay_encode(endpoint))
core.table.clear(endpoint_buffer)
local subsets = endpoint.subsets
for _, subset in ipairs(subsets or {}) do
if subset.addresses then
local addresses = subset.addresses
for _, port in ipairs(subset.ports or {}) do
local port_name
if port.name then
port_name = port.name
elseif port.targetPort then
port_name = tostring(port.targetPort)
else
port_name = tostring(port.port)
end
local nodes = endpoint_buffer[port_name]
if nodes == nil then
nodes = core.table.new(0, #subsets * #addresses)
endpoint_buffer[port_name] = nodes
end
for _, address in ipairs(subset.addresses) do
core.table.insert(nodes, {
host = address.ip,
port = port.port,
weight = handle.default_weight
})
end
end
end
end
for _, ports in pairs(endpoint_buffer) do
for _, nodes in pairs(ports) do
core.table.sort(nodes, sort_nodes_cmp)
end
end
local endpoint_key = endpoint.metadata.namespace .. "/" .. endpoint.metadata.name
local endpoint_content = core.json.encode(endpoint_buffer, true)
local endpoint_version = ngx.crc32_long(endpoint_content)
local _, err
_, err = handle.endpoint_dict:safe_set(endpoint_key .. "#version", endpoint_version)
if err then
core.log.error("set endpoint version into discovery DICT failed, ", err)
return
end
_, err = handle.endpoint_dict:safe_set(endpoint_key, endpoint_content)
if err then
core.log.error("set endpoint into discovery DICT failed, ", err)
handle.endpoint_dict:delete(endpoint_key .. "#version")
end
end
local function on_endpoint_deleted(handle, endpoint)
if handle.namespace_selector and
not handle:namespace_selector(endpoint.metadata.namespace) then
return
end
core.log.debug(core.json.delay_encode(endpoint))
local endpoint_key = endpoint.metadata.namespace .. "/" .. endpoint.metadata.name
handle.endpoint_dict:delete(endpoint_key .. "#version")
handle.endpoint_dict:delete(endpoint_key)
end
local function pre_list(handle)
handle.endpoint_dict:flush_all()
end
local function post_list(handle)
handle.endpoint_dict:flush_expired()
end
local function setup_label_selector(conf, informer)
informer.label_selector = conf.label_selector
end
local function setup_namespace_selector(conf, informer)
local ns = conf.namespace_selector
if ns == nil then
informer.namespace_selector = nil
return
end
if ns.equal then
informer.field_selector = "metadata.namespace=" .. ns.equal
informer.namespace_selector = nil
return
end
if ns.not_equal then
informer.field_selector = "metadata.namespace!=" .. ns.not_equal
informer.namespace_selector = nil
return
end
if ns.match then
informer.namespace_selector = function(self, namespace)
local match = conf.namespace_selector.match
local m, err
for _, v in ipairs(match) do
m, err = ngx.re.match(namespace, v, "jo")
if m and m[0] == namespace then
return true
end
if err then
core.log.error("ngx.re.match failed: ", err)
end
end
return false
end
return
end
if ns.not_match then
informer.namespace_selector = function(self, namespace)
local not_match = conf.namespace_selector.not_match
local m, err
for _, v in ipairs(not_match) do
m, err = ngx.re.match(namespace, v, "jo")
if m and m[0] == namespace then
return false
end
if err then
return false
end
end
return true
end
return
end
return
end
local function read_env(key)
if #key > 3 then
local first, second = string.byte(key, 1, 2)
if first == string.byte('$') and second == string.byte('{') then
local last = string.byte(key, #key)
if last == string.byte('}') then
local env = string.sub(key, 3, #key - 1)
local value = os.getenv(env)
if not value then
return nil, "not found environment variable " .. env
end
return value
end
end
end
return key
end
local function read_token(token_file)
local token, err = util.read_file(token_file)
if err then
return nil, err
end
-- remove possible extra whitespace
return util.trim(token)
end
local function get_apiserver(conf)
local apiserver = {
schema = "",
host = "",
port = "",
}
apiserver.schema = conf.service.schema
if apiserver.schema ~= "http" and apiserver.schema ~= "https" then
return nil, "service.schema should set to one of [http,https] but " .. apiserver.schema
end
local err
apiserver.host, err = read_env(conf.service.host)
if err then
return nil, err
end
if apiserver.host == "" then
return nil, "service.host should set to non-empty string"
end
local port
port, err = read_env(conf.service.port)
if err then
return nil, err
end
apiserver.port = tonumber(port)
if not apiserver.port or apiserver.port <= 0 or apiserver.port > 65535 then
return nil, "invalid port value: " .. apiserver.port
end
if conf.client.token then
local token, err = read_env(conf.client.token)
if err then
return nil, err
end
apiserver.token = util.trim(token)
elseif conf.client.token_file and conf.client.token_file ~= "" then
setmetatable(apiserver, {
__index = function(_, key)
if key ~= "token" then
return
end
local token_file, err = read_env(conf.client.token_file)
if err then
core.log.error("failed to read token file path: ", err)
return
end
local token, err = read_token(token_file)
if err then
core.log.error("failed to read token from file: ", err)
return
end
core.log.debug("re-read the token value")
return token
end
})
else
return nil, "one of [client.token,client.token_file] should be set but none"
end
if apiserver.schema == "https" and apiserver.token == "" then
return nil, "apiserver.token should set to non-empty string when service.schema is https"
end
return apiserver
end
local function create_endpoint_lrucache(endpoint_dict, endpoint_key, endpoint_port)
local endpoint_content = endpoint_dict:get_stale(endpoint_key)
if not endpoint_content then
core.log.error("get empty endpoint content from discovery DIC, this should not happen ",
endpoint_key)
return nil
end
local endpoint = core.json.decode(endpoint_content)
if not endpoint then
core.log.error("decode endpoint content failed, this should not happen, content: ",
endpoint_content)
return nil
end
return endpoint[endpoint_port]
end
local _M = {
version = "0.0.1"
}
local function start_fetch(handle)
local timer_runner
timer_runner = function(premature)
if premature then
return
end
local ok, status = pcall(handle.list_watch, handle, handle.apiserver)
local retry_interval = 0
if not ok then
core.log.error("list_watch failed, kind: ", handle.kind,
", reason: ", "RuntimeException", ", message : ", status)
retry_interval = 40
elseif not status then
retry_interval = 40
end
ngx.timer.at(retry_interval, timer_runner)
end
ngx.timer.at(0, timer_runner)
end
local function get_endpoint_dict(id)
local shm = "kubernetes"
if id and #id > 0 then
shm = shm .. "-" .. id
end
if not is_http then
shm = shm .. "-stream"
end
return ngx.shared[shm]
end
local function single_mode_init(conf)
local endpoint_dict = get_endpoint_dict()
if not endpoint_dict then
error("failed to get lua_shared_dict: ngx.shared.kubernetes, " ..
"please check your APISIX version")
end
if process.type() ~= "privileged agent" then
ctx = endpoint_dict
return
end
local apiserver, err = get_apiserver(conf)
if err then
error(err)
return
end
local default_weight = conf.default_weight
local endpoints_informer, err
if conf.watch_endpoint_slices_schema then
endpoints_informer, err = informer_factory.new("discovery.k8s.io", "v1",
"EndpointSlice", "endpointslices", "")
else
endpoints_informer, err = informer_factory.new("", "v1", "Endpoints", "endpoints", "")
end
if err then
error(err)
return
end
setup_namespace_selector(conf, endpoints_informer)
setup_label_selector(conf, endpoints_informer)
if conf.watch_endpoint_slices_schema then
endpoints_informer.on_added = on_endpoint_slices_modified
endpoints_informer.on_modified = on_endpoint_slices_modified
else
endpoints_informer.on_added = on_endpoint_modified
endpoints_informer.on_modified = on_endpoint_modified
end
endpoints_informer.on_deleted = on_endpoint_deleted
endpoints_informer.pre_list = pre_list
endpoints_informer.post_list = post_list
ctx = setmetatable({
endpoint_dict = endpoint_dict,
apiserver = apiserver,
default_weight = default_weight
}, { __index = endpoints_informer })
start_fetch(ctx)
end
local function single_mode_nodes(service_name)
local pattern = "^(.*):(.*)$" -- namespace/name:port_name
local match = ngx.re.match(service_name, pattern, "jo")
if not match then
core.log.error("get unexpected upstream service_name: ", service_name)
return nil
end
local endpoint_dict = ctx
local endpoint_key = match[1]
local endpoint_port = match[2]
local endpoint_version = endpoint_dict:get_stale(endpoint_key .. "#version")
if not endpoint_version then
core.log.info("get empty endpoint version from discovery DICT ", endpoint_key)
return nil
end
return endpoint_lrucache(service_name, endpoint_version,
create_endpoint_lrucache, endpoint_dict, endpoint_key, endpoint_port)
end
local function multiple_mode_worker_init(confs)
for _, conf in ipairs(confs) do
local id = conf.id
if ctx[id] then
error("duplicate id value")
end
local endpoint_dict = get_endpoint_dict(id)
if not endpoint_dict then
error(string.format("failed to get lua_shared_dict: ngx.shared.kubernetes-%s, ", id) ..
"please check your APISIX version")
end
ctx[id] = endpoint_dict
end
end
local function multiple_mode_init(confs)
ctx = core.table.new(#confs, 0)
if process.type() ~= "privileged agent" then
multiple_mode_worker_init(confs)
return
end
for _, conf in ipairs(confs) do
local id = conf.id
if ctx[id] then
error("duplicate id value")
end
local endpoint_dict = get_endpoint_dict(id)
if not endpoint_dict then
error(string.format("failed to get lua_shared_dict: ngx.shared.kubernetes-%s, ", id) ..
"please check your APISIX version")
end
local apiserver, err = get_apiserver(conf)
if err then
error(err)
return
end
local default_weight = conf.default_weight
local endpoints_informer, err
if conf.watch_endpoint_slices_schema then
endpoints_informer, err = informer_factory.new("discovery.k8s.io", "v1",
"EndpointSlice", "endpointslices", "")
else
endpoints_informer, err = informer_factory.new("", "v1", "Endpoints", "endpoints", "")
end
if err then
error(err)
return
end
setup_namespace_selector(conf, endpoints_informer)
setup_label_selector(conf, endpoints_informer)
if conf.watch_endpoint_slices_schema then
endpoints_informer.on_added = on_endpoint_slices_modified
endpoints_informer.on_modified = on_endpoint_slices_modified
else
endpoints_informer.on_added = on_endpoint_modified
endpoints_informer.on_modified = on_endpoint_modified
end
endpoints_informer.on_deleted = on_endpoint_deleted
endpoints_informer.pre_list = pre_list
endpoints_informer.post_list = post_list
ctx[id] = setmetatable({
endpoint_dict = endpoint_dict,
apiserver = apiserver,
default_weight = default_weight
}, { __index = endpoints_informer })
end
for _, item in pairs(ctx) do
start_fetch(item)
end
end
local function multiple_mode_nodes(service_name)
local pattern = "^(.*)/(.*/.*):(.*)$" -- id/namespace/name:port_name
local match = ngx.re.match(service_name, pattern, "jo")
if not match then
core.log.error("get unexpected upstream service_name: ", service_name)
return nil
end
local id = match[1]
local endpoint_dict = ctx[id]
if not endpoint_dict then
core.log.error("id not exist")
return nil
end
local endpoint_key = match[2]
local endpoint_port = match[3]
local endpoint_version = endpoint_dict:get_stale(endpoint_key .. "#version")
if not endpoint_version then
core.log.info("get empty endpoint version from discovery DICT ", endpoint_key)
return nil
end
return endpoint_lrucache(service_name, endpoint_version,
create_endpoint_lrucache, endpoint_dict, endpoint_key, endpoint_port)
end
function _M.init_worker()
local discovery_conf = local_conf.discovery.kubernetes
core.log.info("kubernetes discovery conf: ", core.json.delay_encode(discovery_conf))
if #discovery_conf == 0 then
_M.nodes = single_mode_nodes
single_mode_init(discovery_conf)
else
_M.nodes = multiple_mode_nodes
multiple_mode_init(discovery_conf)
end
end
local function dump_endpoints_from_dict(endpoint_dict)
local keys, err = endpoint_dict:get_keys(0)
if err then
core.log.error("get keys from discovery dict failed: ", err)
return
end
if not keys or #keys == 0 then
return
end
local endpoints = {}
for i = 1, #keys do
local key = keys[i]
-- skip key with suffix #version
if key:sub(-#"#version") ~= "#version" then
local value = endpoint_dict:get(key)
core.table.insert(endpoints, {
name = key,
value = value
})
end
end
return endpoints
end
function _M.dump_data()
local discovery_conf = local_conf.discovery.kubernetes
local eps = {}
if #discovery_conf == 0 then
-- Single mode: discovery_conf is a single configuration object
local endpoint_dict = get_endpoint_dict()
local endpoints = dump_endpoints_from_dict(endpoint_dict)
if endpoints then
core.table.insert(eps, {
endpoints = endpoints
})
end
else
-- Multiple mode: discovery_conf is an array of configuration objects
for _, conf in ipairs(discovery_conf) do
local endpoint_dict = get_endpoint_dict(conf.id)
local endpoints = dump_endpoints_from_dict(endpoint_dict)
if endpoints then
core.table.insert(eps, {
id = conf.id,
endpoints = endpoints
})
end
end
end
return {config = discovery_conf, endpoints = eps}
end
return _M

View File

@@ -0,0 +1,217 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local host_patterns = {
{ pattern = [[^\${[_A-Za-z]([_A-Za-z0-9]*[_A-Za-z])*}$]] },
{ pattern = [[^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$]] },
}
local port_patterns = {
{ pattern = [[^\${[_A-Za-z]([_A-Za-z0-9]*[_A-Za-z])*}$]] },
{ pattern = [[^(([1-9]\d{0,3}|[1-5]\d{4}|6[0-4]\d{3}|65[0-4]\d{2}|655[0-2]\d|6553[0-5]))$]] },
}
local schema_schema = {
type = "string",
enum = { "http", "https" },
default = "https",
}
local token_patterns = {
{ pattern = [[\${[_A-Za-z]([_A-Za-z0-9]*[_A-Za-z])*}$]] },
{ pattern = [[^[A-Za-z0-9+\/._=-]{0,4096}$]] },
}
local token_schema = {
type = "string",
oneOf = token_patterns,
}
local token_file_schema = {
type = "string",
pattern = [[^[^\:*?"<>|]*$]],
minLength = 1,
maxLength = 500,
}
local namespace_pattern = [[^[a-z0-9]([-a-z0-9_.]*[a-z0-9])?$]]
local namespace_regex_pattern = [[^[\x21-\x7e]*$]]
local namespace_selector_schema = {
type = "object",
properties = {
equal = {
type = "string",
pattern = namespace_pattern,
},
not_equal = {
type = "string",
pattern = namespace_pattern,
},
match = {
type = "array",
items = {
type = "string",
pattern = namespace_regex_pattern
},
minItems = 1
},
not_match = {
type = "array",
items = {
type = "string",
pattern = namespace_regex_pattern
},
minItems = 1
},
},
oneOf = {
{ required = {} },
{ required = { "equal" } },
{ required = { "not_equal" } },
{ required = { "match" } },
{ required = { "not_match" } }
},
}
local label_selector_schema = {
type = "string",
}
local default_weight_schema = {
type = "integer",
default = 50,
minimum = 0,
}
local shared_size_schema = {
type = "string",
pattern = [[^[1-9][0-9]*m$]],
default = "1m",
}
local watch_endpoint_slices_schema = {
type = "boolean",
default = false,
}
return {
anyOf = {
{
type = "object",
properties = {
service = {
type = "object",
properties = {
schema = schema_schema,
host = {
type = "string",
oneOf = host_patterns,
default = "${KUBERNETES_SERVICE_HOST}",
},
port = {
type = "string",
oneOf = port_patterns,
default = "${KUBERNETES_SERVICE_PORT}",
},
},
default = {
schema = "https",
host = "${KUBERNETES_SERVICE_HOST}",
port = "${KUBERNETES_SERVICE_PORT}",
}
},
client = {
type = "object",
properties = {
token = token_schema,
token_file = token_file_schema,
},
default = {
token_file = "/var/run/secrets/kubernetes.io/serviceaccount/token"
},
["if"] = {
["not"] = {
anyOf = {
{ required = { "token" } },
{ required = { "token_file" } },
}
}
},
["then"] = {
properties = {
token_file = {
default = "/var/run/secrets/kubernetes.io/serviceaccount/token"
}
}
}
},
namespace_selector = namespace_selector_schema,
label_selector = label_selector_schema,
default_weight = default_weight_schema,
shared_size = shared_size_schema,
watch_endpoint_slices = watch_endpoint_slices_schema,
},
},
{
type = "array",
minItems = 1,
items = {
type = "object",
properties = {
id = {
type = "string",
pattern = [[^[a-z0-9]{1,8}$]]
},
service = {
type = "object",
properties = {
schema = schema_schema,
host = {
type = "string",
oneOf = host_patterns,
},
port = {
type = "string",
oneOf = port_patterns,
},
},
required = { "host", "port" }
},
client = {
type = "object",
properties = {
token = token_schema,
token_file = token_file_schema,
},
oneOf = {
{ required = { "token" } },
{ required = { "token_file" } },
},
},
namespace_selector = namespace_selector_schema,
label_selector = label_selector_schema,
default_weight = default_weight_schema,
shared_size = shared_size_schema,
watch_endpoint_slices = watch_endpoint_slices_schema,
},
required = { "id", "service", "client" }
},
}
}
}

View File

@@ -0,0 +1,392 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local require = require
local local_conf = require('apisix.core.config_local').local_conf()
local http = require('resty.http')
local core = require('apisix.core')
local ipairs = ipairs
local pairs = pairs
local type = type
local math = math
local math_random = math.random
local ngx = ngx
local ngx_re = require('ngx.re')
local ngx_timer_at = ngx.timer.at
local ngx_timer_every = ngx.timer.every
local string = string
local string_sub = string.sub
local str_byte = string.byte
local str_find = core.string.find
local log = core.log
local default_weight
local nacos_dict = ngx.shared.nacos --key: namespace_id.group_name.service_name
if not nacos_dict then
error("lua_shared_dict \"nacos\" not configured")
end
local auth_path = 'auth/login'
local instance_list_path = 'ns/instance/list?healthyOnly=true&serviceName='
local default_namespace_id = "public"
local default_group_name = "DEFAULT_GROUP"
local access_key
local secret_key
local _M = {}
local function get_key(namespace_id, group_name, service_name)
return namespace_id .. '.' .. group_name .. '.' .. service_name
end
local function request(request_uri, path, body, method, basic_auth)
local url = request_uri .. path
log.info('request url:', url)
local headers = {}
headers['Accept'] = 'application/json'
if basic_auth then
headers['Authorization'] = basic_auth
end
if body and 'table' == type(body) then
local err
body, err = core.json.encode(body)
if not body then
return nil, 'invalid body : ' .. err
end
headers['Content-Type'] = 'application/json'
end
local httpc = http.new()
local timeout = local_conf.discovery.nacos.timeout
local connect_timeout = timeout.connect
local send_timeout = timeout.send
local read_timeout = timeout.read
log.info('connect_timeout:', connect_timeout, ', send_timeout:', send_timeout,
', read_timeout:', read_timeout)
httpc:set_timeouts(connect_timeout, send_timeout, read_timeout)
local res, err = httpc:request_uri(url, {
method = method,
headers = headers,
body = body,
ssl_verify = true,
})
if not res then
return nil, err
end
if not res.body or res.status ~= 200 then
return nil, 'status = ' .. res.status
end
local json_str = res.body
local data, err = core.json.decode(json_str)
if not data then
return nil, err
end
return data
end
local function get_url(request_uri, path)
return request(request_uri, path, nil, 'GET', nil)
end
local function post_url(request_uri, path, body)
return request(request_uri, path, body, 'POST', nil)
end
local function get_token_param(base_uri, username, password)
if not username or not password then
return ''
end
local args = { username = username, password = password}
local data, err = post_url(base_uri, auth_path .. '?' .. ngx.encode_args(args), nil)
if err then
log.error('nacos login fail:', username, ' ', password, ' desc:', err)
return nil, err
end
return '&accessToken=' .. data.accessToken
end
local function get_namespace_param(namespace_id)
local param = ''
if namespace_id then
local args = {namespaceId = namespace_id}
param = '&' .. ngx.encode_args(args)
end
return param
end
local function get_group_name_param(group_name)
local param = ''
if group_name then
local args = {groupName = group_name}
param = '&' .. ngx.encode_args(args)
end
return param
end
local function get_signed_param(group_name, service_name)
local param = ''
if access_key ~= '' and secret_key ~= '' then
local str_to_sign = ngx.now() * 1000 .. '@@' .. group_name .. '@@' .. service_name
local args = {
ak = access_key,
data = str_to_sign,
signature = ngx.encode_base64(ngx.hmac_sha1(secret_key, str_to_sign))
}
param = '&' .. ngx.encode_args(args)
end
return param
end
local function get_base_uri()
local host = local_conf.discovery.nacos.host
-- TODO Add health check to get healthy nodes.
local url = host[math_random(#host)]
local auth_idx = core.string.rfind_char(url, '@')
local username, password
if auth_idx then
local protocol_idx = str_find(url, '://')
local protocol = string_sub(url, 1, protocol_idx + 2)
local user_and_password = string_sub(url, protocol_idx + 3, auth_idx - 1)
local arr = ngx_re.split(user_and_password, ':')
if #arr == 2 then
username = arr[1]
password = arr[2]
end
local other = string_sub(url, auth_idx + 1)
url = protocol .. other
end
if local_conf.discovery.nacos.prefix then
url = url .. local_conf.discovery.nacos.prefix
end
if str_byte(url, #url) ~= str_byte('/') then
url = url .. '/'
end
return url, username, password
end
local function de_duplication(services, namespace_id, group_name, service_name, scheme)
for _, service in ipairs(services) do
if service.namespace_id == namespace_id and service.group_name == group_name
and service.service_name == service_name and service.scheme == scheme then
return true
end
end
return false
end
local function iter_and_add_service(services, values)
if not values then
return
end
for _, value in core.config_util.iterate_values(values) do
local conf = value.value
if not conf then
goto CONTINUE
end
local up
if conf.upstream then
up = conf.upstream
else
up = conf
end
local namespace_id = (up.discovery_args and up.discovery_args.namespace_id)
or default_namespace_id
local group_name = (up.discovery_args and up.discovery_args.group_name)
or default_group_name
local dup = de_duplication(services, namespace_id, group_name,
up.service_name, up.scheme)
if dup then
goto CONTINUE
end
if up.discovery_type == 'nacos' then
core.table.insert(services, {
service_name = up.service_name,
namespace_id = namespace_id,
group_name = group_name,
scheme = up.scheme,
})
end
::CONTINUE::
end
end
local function get_nacos_services()
local services = {}
-- here we use lazy load to work around circle dependency
local get_upstreams = require('apisix.upstream').upstreams
local get_routes = require('apisix.router').http_routes
local get_stream_routes = require('apisix.router').stream_routes
local get_services = require('apisix.http.service').services
local values = get_upstreams()
iter_and_add_service(services, values)
values = get_routes()
iter_and_add_service(services, values)
values = get_services()
iter_and_add_service(services, values)
values = get_stream_routes()
iter_and_add_service(services, values)
return services
end
local function is_grpc(scheme)
if scheme == 'grpc' or scheme == 'grpcs' then
return true
end
return false
end
local curr_service_in_use = {}
local function fetch_full_registry(premature)
if premature then
return
end
local base_uri, username, password = get_base_uri()
local token_param, err = get_token_param(base_uri, username, password)
if err then
log.error('get_token_param error:', err)
return
end
local infos = get_nacos_services()
if #infos == 0 then
return
end
local service_names = {}
for _, service_info in ipairs(infos) do
local data, err
local namespace_id = service_info.namespace_id
local group_name = service_info.group_name
local scheme = service_info.scheme or ''
local namespace_param = get_namespace_param(service_info.namespace_id)
local group_name_param = get_group_name_param(service_info.group_name)
local signature_param = get_signed_param(service_info.group_name, service_info.service_name)
local query_path = instance_list_path .. service_info.service_name
.. token_param .. namespace_param .. group_name_param
.. signature_param
data, err = get_url(base_uri, query_path)
if err then
log.error('get_url:', query_path, ' err:', err)
goto CONTINUE
end
local nodes = {}
local key = get_key(namespace_id, group_name, service_info.service_name)
service_names[key] = true
for _, host in ipairs(data.hosts) do
local node = {
host = host.ip,
port = host.port,
weight = host.weight or default_weight,
}
-- docs: https://github.com/yidongnan/grpc-spring-boot-starter/pull/496
if is_grpc(scheme) and host.metadata and host.metadata.gRPC_port then
node.port = host.metadata.gRPC_port
end
core.table.insert(nodes, node)
end
if #nodes > 0 then
local content = core.json.encode(nodes)
nacos_dict:set(key, content)
end
::CONTINUE::
end
-- remove services that are not in use anymore
for key, _ in pairs(curr_service_in_use) do
if not service_names[key] then
nacos_dict:delete(key)
end
end
curr_service_in_use = service_names
end
function _M.nodes(service_name, discovery_args)
local namespace_id = discovery_args and
discovery_args.namespace_id or default_namespace_id
local group_name = discovery_args
and discovery_args.group_name or default_group_name
local key = get_key(namespace_id, group_name, service_name)
local value = nacos_dict:get(key)
if not value then
core.log.error("nacos service not found: ", service_name)
return nil
end
local nodes = core.json.decode(value)
return nodes
end
function _M.init_worker()
default_weight = local_conf.discovery.nacos.weight
log.info('default_weight:', default_weight)
local fetch_interval = local_conf.discovery.nacos.fetch_interval
log.info('fetch_interval:', fetch_interval)
access_key = local_conf.discovery.nacos.access_key
secret_key = local_conf.discovery.nacos.secret_key
ngx_timer_at(0, fetch_full_registry)
ngx_timer_every(fetch_interval, fetch_full_registry)
end
function _M.dump_data()
local keys = nacos_dict:get_keys(0)
local applications = {}
for _, key in ipairs(keys) do
local value = nacos_dict:get(key)
if value then
local nodes = core.json.decode(value)
if nodes then
applications[key] = {
nodes = nodes,
}
end
end
end
return {services = applications or {}}
end
return _M

View File

@@ -0,0 +1,59 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local host_pattern = [[^http(s)?:\/\/([a-zA-Z0-9-_.]+:.+\@)?[a-zA-Z0-9-_.:]+$]]
local prefix_pattern = [[^[\/a-zA-Z0-9-_.]+$]]
return {
type = 'object',
properties = {
host = {
type = 'array',
minItems = 1,
items = {
type = 'string',
pattern = host_pattern,
minLength = 2,
maxLength = 100,
},
},
fetch_interval = {type = 'integer', minimum = 1, default = 30},
prefix = {
type = 'string',
pattern = prefix_pattern,
maxLength = 100,
default = '/nacos/v1/'
},
weight = {type = 'integer', minimum = 1, default = 100},
timeout = {
type = 'object',
properties = {
connect = {type = 'integer', minimum = 1, default = 2000},
send = {type = 'integer', minimum = 1, default = 2000},
read = {type = 'integer', minimum = 1, default = 5000},
},
default = {
connect = 2000,
send = 2000,
read = 5000,
}
},
access_key = {type = 'string', default = ''},
secret_key = {type = 'string', default = ''},
},
required = {'host'}
}

View File

@@ -0,0 +1,367 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local ngx = ngx
local format = string.format
local ipairs = ipairs
local error = error
local tonumber = tonumber
local local_conf = require("apisix.core.config_local").local_conf()
local core = require("apisix.core")
local mysql = require("resty.mysql")
local is_http = ngx.config.subsystem == "http"
local process = require("ngx.process")
local endpoint_dict
local full_query_sql = [[ select servant, group_concat(endpoint order by endpoint) as endpoints
from t_server_conf left join t_adapter_conf tac using (application, server_name, node_name)
where setting_state = 'active' and present_state = 'active'
group by servant ]]
local incremental_query_sql = [[
select servant, (setting_state = 'active' and present_state = 'active') activated,
group_concat(endpoint order by endpoint) endpoints
from t_server_conf left join t_adapter_conf tac using (application, server_name, node_name)
where (application, server_name) in
(
select application, server_name from t_server_conf
where registry_timestamp > now() - interval %d second
union
select application, server_name from t_adapter_conf
where registry_timestamp > now() - interval %d second
)
group by servant, activated order by activated desc ]]
local _M = {
version = 0.1,
}
local default_weight
local last_fetch_full_time = 0
local last_db_error
local endpoint_lrucache = core.lrucache.new({
ttl = 300,
count = 1024
})
local activated_buffer = core.table.new(10, 0)
local nodes_buffer = core.table.new(0, 5)
--[[
endpoints format as follows:
tcp -h 172.16.1.1 -p 11 -t 6000 -e 0,tcp -e 0 -p 12 -h 172.16.1.1,tcp -p 13 -h 172.16.1.1
we extract host and port value via endpoints_pattern
--]]
local endpoints_pattern = core.table.concat(
{ [[tcp(\s*-[te]\s*(\S+)){0,2}\s*-([hpHP])\s*(\S+)(\s*-[teTE]\s*(\S+))]],
[[{0,2}\s*-([hpHP])\s*(\S+)(\s*-[teTE]\s*(\S+)){0,2}\s*(,|$)]] }
)
local function update_endpoint(servant, nodes)
local endpoint_content = core.json.encode(nodes, true)
local endpoint_version = ngx.crc32_long(endpoint_content)
core.log.debug("set servant ", servant, endpoint_content)
local _, err
_, err = endpoint_dict:safe_set(servant .. "#version", endpoint_version)
if err then
core.log.error("set endpoint version into nginx shared dict failed, ", err)
return
end
_, err = endpoint_dict:safe_set(servant, endpoint_content)
if err then
core.log.error("set endpoint into nginx shared dict failed, ", err)
endpoint_dict:delete(servant .. "#version")
end
end
local function delete_endpoint(servant)
core.log.info("delete servant ", servant)
endpoint_dict:delete(servant .. "#version")
endpoint_dict:delete(servant)
end
local function add_endpoint_to_lrucache(servant)
local endpoint_content, err = endpoint_dict:get_stale(servant)
if not endpoint_content then
core.log.error("get empty endpoint content, servant: ", servant, ", err: ", err)
return nil
end
local endpoint, err = core.json.decode(endpoint_content)
if not endpoint then
core.log.error("decode json failed, content: ", endpoint_content, ", err: ", err)
return nil
end
return endpoint
end
local function get_endpoint(servant)
--[[
fetch_full function will:
1: call endpoint_dict:flush_all()
2: setup servant:nodes pairs into endpoint_dict
3: call endpoint_dict:flush_expired()
get_endpoint may be called during the 2 step of the fetch_full function,
so we must use endpoint_dict:get_stale() to get value instead endpoint_dict:get()
--]]
local endpoint_version, err = endpoint_dict:get_stale(servant .. "#version")
if not endpoint_version then
if err then
core.log.error("get empty endpoint version, servant: ", servant, ", err: ", err)
end
return nil
end
return endpoint_lrucache(servant, endpoint_version, add_endpoint_to_lrucache, servant)
end
local function extract_endpoint(query_result)
for _, p in ipairs(query_result) do
repeat
local servant = p.servant
if servant == ngx.null then
break
end
if p.activated == 1 then
activated_buffer[servant] = ngx.null
elseif p.activated == 0 then
if activated_buffer[servant] == nil then
delete_endpoint(servant)
end
break
end
core.table.clear(nodes_buffer)
local iterator = ngx.re.gmatch(p.endpoints, endpoints_pattern, "jao")
while true do
local captures, err = iterator()
if err then
core.log.error("gmatch failed, error: ", err, " , endpoints: ", p.endpoints)
break
end
if not captures then
break
end
local host, port
if captures[3] == "h" or captures[3] == "H" then
host = captures[4]
port = tonumber(captures[8])
else
host = captures[8]
port = tonumber(captures[4])
end
core.table.insert(nodes_buffer, {
host = host,
port = port,
weight = default_weight,
})
end
update_endpoint(servant, nodes_buffer)
until true
end
end
local function fetch_full(db_cli)
local res, err, errcode, sqlstate = db_cli:query(full_query_sql)
--[[
res format is as follows:
{
{
servant = "A.AServer.FirstObj",
endpoints = "tcp -h 172.16.1.1 -p 10001 -e 0 -t 3000,tcp -p 10002 -h 172.16.1.2 -t 3000"
},
{
servant = "A.AServer.SecondObj",
endpoints = "tcp -t 3000 -p 10002 -h 172.16.1.2"
},
}
if current endpoint_dict is as follows:
key1:nodes1, key2:nodes2, key3:nodes3
then fetch_full get follow results:
key1:nodes1, key4:nodes4, key5:nodes5
at this time, we need
1: setup key4:nodes4, key5:nodes5
2: delete key2:nodes2, key3:nodes3
to achieve goals, we should:
1: before setup results, execute endpoint_dict:flush_all()
2: after setup results, execute endpoint_dict:flush_expired()
--]]
if not res then
core.log.error("query failed, error: ", err, ", ", errcode, " ", sqlstate)
return err
end
endpoint_dict:flush_all()
extract_endpoint(res)
while err == "again" do
res, err, errcode, sqlstate = db_cli:read_result()
if not res then
if err then
core.log.error("read result failed, error: ", err, ", ", errcode, " ", sqlstate)
end
return err
end
extract_endpoint(res)
end
endpoint_dict:flush_expired()
return nil
end
local function fetch_incremental(db_cli)
local res, err, errcode, sqlstate = db_cli:query(incremental_query_sql)
--[[
res is as follows:
{
{
activated=1,
servant = "A.AServer.FirstObj",
endpoints = "tcp -h 172.16.1.1 -p 10001 -e 0 -t 3000,tcp -p 10002 -h 172.16.1.2 -t 3000"
},
{
activated=0,
servant = "A.AServer.FirstObj",
endpoints = "tcp -t 3000 -p 10001 -h 172.16.1.3"
},
{
activated=0,
servant = "B.BServer.FirstObj",
endpoints = "tcp -t 3000 -p 10002 -h 172.16.1.2"
},
}
for each item:
if activated==1, setup
if activated==0, if there is a other item had same servant and activate==1, ignore
if activated==0, and there is no other item had same servant, delete
--]]
if not res then
core.log.error("query failed, error: ", err, ", ", errcode, " ", sqlstate)
return err
end
core.table.clear(activated_buffer)
extract_endpoint(res)
while err == "again" do
res, err, errcode, sqlstate = db_cli:read_result()
if not res then
if err then
core.log.error("read result failed, error: ", err, ", ", errcode, " ", sqlstate)
end
return err
end
extract_endpoint(res)
end
return nil
end
local function fetch_endpoint(premature, conf)
if premature then
return
end
local db_cli, err = mysql:new()
if not db_cli then
core.log.error("failed to instantiate mysql: ", err)
return
end
db_cli:set_timeout(3000)
local ok, err, errcode, sqlstate = db_cli:connect(conf.db_conf)
if not ok then
core.log.error("failed to connect mysql: ", err, ", ", errcode, ", ", sqlstate)
return
end
local now = ngx.time()
if last_db_error or last_fetch_full_time + conf.full_fetch_interval <= now then
last_fetch_full_time = now
last_db_error = fetch_full(db_cli)
else
last_db_error = fetch_incremental(db_cli)
end
if not last_db_error then
db_cli:set_keepalive(120 * 1000, 1)
end
end
function _M.nodes(servant)
return get_endpoint(servant)
end
local function get_endpoint_dict()
local shm = "tars"
if not is_http then
shm = shm .. "-stream"
end
return ngx.shared[shm]
end
function _M.init_worker()
endpoint_dict = get_endpoint_dict()
if not endpoint_dict then
error("failed to get lua_shared_dict: tars, please check your APISIX version")
end
if process.type() ~= "privileged agent" then
return
end
local conf = local_conf.discovery.tars
default_weight = conf.default_weight
core.log.info("conf ", core.json.delay_encode(conf))
local backtrack_time = conf.incremental_fetch_interval + 5
incremental_query_sql = format(incremental_query_sql, backtrack_time, backtrack_time)
ngx.timer.at(0, fetch_endpoint, conf)
ngx.timer.every(conf.incremental_fetch_interval, fetch_endpoint, conf)
end
return _M

View File

@@ -0,0 +1,45 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local host_pattern = [[^([a-zA-Z0-9-_.]+:.+\@)?[a-zA-Z0-9-_.:]+$]]
return {
type = 'object',
properties = {
db_conf = {
type = 'object',
properties = {
host = { type = 'string', minLength = 1, maxLength = 500, pattern = host_pattern },
port = { type = 'integer', minimum = 1, maximum = 65535, default = 3306 },
database = { type = 'string', minLength = 1, maxLength = 64 },
user = { type = 'string', minLength = 1, maxLength = 64 },
password = { type = 'string', minLength = 1, maxLength = 64 },
},
required = { 'host', 'database', 'user', 'password' }
},
full_fetch_interval = {
type = 'integer', minimum = 90, maximum = 3600, default = 300,
},
incremental_fetch_interval = {
type = 'integer', minimum = 5, maximum = 60, default = 15,
},
default_weight = {
type = 'integer', minimum = 0, maximum = 100, default = 100,
},
},
required = { 'db_conf' }
}

View File

@@ -0,0 +1,139 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local require = require
local error = error
local assert = assert
local tostring = tostring
local pairs = pairs
local setmetatable = setmetatable
local ngx = ngx
local core = require("apisix.core")
local _M = {
events_module = nil,
}
_M.EVENTS_MODULE_LUA_RESTY_WORKER_EVENTS = 'lua-resty-worker-events'
_M.EVENTS_MODULE_LUA_RESTY_EVENTS = 'lua-resty-events'
-- use lua-resty-worker-events
local function init_resty_worker_events()
_M.events_module = _M.EVENTS_MODULE_LUA_RESTY_WORKER_EVENTS
local we = require("resty.worker.events")
local shm = ngx.config.subsystem == "http" and "worker-events" or "worker-events-stream"
local ok, err = we.configure({shm = shm, interval = 0.1})
if not ok then
error("failed to init worker event: " .. err)
end
return we
end
-- use lua-resty-events
local function init_resty_events()
_M.events_module = _M.EVENTS_MODULE_LUA_RESTY_EVENTS
local listening = "unix:" .. ngx.config.prefix() .. "logs/"
if ngx.config.subsystem == "http" then
listening = listening .. "worker_events.sock"
else
listening = listening .. "stream_worker_events.sock"
end
core.log.info("subsystem: " .. ngx.config.subsystem .. " listening sock: " .. listening)
local opts = {
unique_timeout = 5, -- life time of unique event data in lrucache
broker_id = 0, -- broker server runs in nginx worker #0
listening = listening, -- unix socket for broker listening
}
local we = require("resty.events.compat")
assert(we.configure(opts))
assert(we.configured())
return we
end
function _M.init_worker()
if _M.inited then
-- prevent duplicate initializations in the same worker to
-- avoid potentially unexpected behavior
return
end
_M.inited = true
local conf = core.config.local_conf()
local module_name = core.table.try_read_attr(conf, "apisix", "events", "module")
or _M.EVENTS_MODULE_LUA_RESTY_WORKER_EVENTS
if module_name == _M.EVENTS_MODULE_LUA_RESTY_EVENTS then
-- use lua-resty-events as an event module via the apisix.events.module
-- key in the configuration file
_M.worker_events = init_resty_events()
else
-- use lua-resty-worker-events default now
_M.worker_events = init_resty_worker_events()
end
end
function _M.register(self, ...)
return self.worker_events.register(...)
end
function _M.event_list(self, source, ...)
-- a patch for the lua-resty-events to support event_list
-- this snippet is copied from the lua-resty-worker-events lib
if self.events_module == _M.EVENTS_MODULE_LUA_RESTY_EVENTS then
local events = { _source = source }
for _, event in pairs({...}) do
events[event] = event
end
return setmetatable(events, {
__index = function(_, key)
error("event '"..tostring(key).."' is an unknown event", 2)
end
})
end
-- the lua-resty-worker-events has a built-in event_list implementation
return self.worker_events.event_list(source, ...)
end
function _M.post(self, ...)
return self.worker_events.post(...)
end
function _M.get_healthcheck_events_modele(self)
if self.events_module == _M.EVENTS_MODULE_LUA_RESTY_EVENTS then
return "resty.events"
else
return "resty.worker.events"
end
end
return _M

View File

@@ -0,0 +1,56 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local plugin_checker = require("apisix.plugin").plugin_checker
local error = error
local _M = {}
local global_rules
function _M.init_worker()
local err
global_rules, err = core.config.new("/global_rules", {
automatic = true,
item_schema = core.schema.global_rule,
checker = plugin_checker,
})
if not global_rules then
error("failed to create etcd instance for fetching /global_rules : "
.. err)
end
end
function _M.global_rules()
if not global_rules then
return nil, nil
end
return global_rules.values, global_rules.conf_version
end
function _M.get_pre_index()
if not global_rules then
return nil
end
return global_rules.prev_index
end
return _M

View File

@@ -0,0 +1,153 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local require = require
local radixtree = require("resty.radixtree")
local router = require("apisix.utils.router")
local service_fetch = require("apisix.http.service").get
local core = require("apisix.core")
local expr = require("resty.expr.v1")
local plugin_checker = require("apisix.plugin").plugin_checker
local event = require("apisix.core.event")
local ipairs = ipairs
local type = type
local error = error
local loadstring = loadstring
local _M = {}
function _M.create_radixtree_uri_router(routes, uri_routes, with_parameter)
routes = routes or {}
core.table.clear(uri_routes)
for _, route in ipairs(routes) do
if type(route) == "table" then
local status = core.table.try_read_attr(route, "value", "status")
-- check the status
if status and status == 0 then
goto CONTINUE
end
local filter_fun, err
if route.value.filter_func then
filter_fun, err = loadstring(
"return " .. route.value.filter_func,
"router#" .. route.value.id)
if not filter_fun then
core.log.error("failed to load filter function: ", err,
" route id: ", route.value.id)
goto CONTINUE
end
filter_fun = filter_fun()
end
local hosts = route.value.hosts or route.value.host
if not hosts and route.value.service_id then
local service = service_fetch(route.value.service_id)
if not service then
core.log.error("failed to fetch service configuration by ",
"id: ", route.value.service_id)
-- we keep the behavior that missing service won't affect the route matching
else
hosts = service.value.hosts
end
end
core.log.info("insert uri route: ",
core.json.delay_encode(route.value, true))
core.table.insert(uri_routes, {
paths = route.value.uris or route.value.uri,
methods = route.value.methods,
priority = route.value.priority,
hosts = hosts,
remote_addrs = route.value.remote_addrs
or route.value.remote_addr,
vars = route.value.vars,
filter_fun = filter_fun,
handler = function (api_ctx, match_opts)
api_ctx.matched_params = nil
api_ctx.matched_route = route
api_ctx.curr_req_matched = match_opts.matched
end
})
::CONTINUE::
end
end
event.push(event.CONST.BUILD_ROUTER, routes)
core.log.info("route items: ", core.json.delay_encode(uri_routes, true))
if with_parameter then
return radixtree.new(uri_routes)
else
return router.new(uri_routes)
end
end
function _M.match_uri(uri_router, api_ctx)
local match_opts = core.tablepool.fetch("route_match_opts", 0, 4)
match_opts.method = api_ctx.var.request_method
match_opts.host = api_ctx.var.host
match_opts.remote_addr = api_ctx.var.remote_addr
match_opts.vars = api_ctx.var
match_opts.matched = core.tablepool.fetch("matched_route_record", 0, 4)
local ok = uri_router:dispatch(api_ctx.var.uri, match_opts, api_ctx, match_opts)
core.tablepool.release("route_match_opts", match_opts)
return ok
end
-- additional check for synced route configuration, run after schema check
local function check_route(route)
local ok, err = plugin_checker(route)
if not ok then
return nil, err
end
if route.vars then
ok, err = expr.new(route.vars)
if not ok then
return nil, "failed to validate the 'vars' expression: " .. err
end
end
return true
end
function _M.init_worker(filter)
local user_routes, err = core.config.new("/routes", {
automatic = true,
item_schema = core.schema.route,
checker = check_route,
filter = filter,
})
if not user_routes then
error("failed to create etcd instance for fetching /routes : " .. err)
end
return user_routes
end
return _M

View File

@@ -0,0 +1,193 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local require = require
local router = require("apisix.utils.router")
local core = require("apisix.core")
local event = require("apisix.core.event")
local get_services = require("apisix.http.service").services
local service_fetch = require("apisix.http.service").get
local ipairs = ipairs
local type = type
local tab_insert = table.insert
local loadstring = loadstring
local pairs = pairs
local cached_router_version
local cached_service_version
local host_router
local only_uri_router
local _M = {version = 0.1}
local function push_host_router(route, host_routes, only_uri_routes)
if type(route) ~= "table" then
return
end
local filter_fun, err
if route.value.filter_func then
filter_fun, err = loadstring(
"return " .. route.value.filter_func,
"router#" .. route.value.id)
if not filter_fun then
core.log.error("failed to load filter function: ", err,
" route id: ", route.value.id)
return
end
filter_fun = filter_fun()
end
local hosts = route.value.hosts
if not hosts then
if route.value.host then
hosts = {route.value.host}
elseif route.value.service_id then
local service = service_fetch(route.value.service_id)
if not service then
core.log.error("failed to fetch service configuration by ",
"id: ", route.value.service_id)
-- we keep the behavior that missing service won't affect the route matching
else
hosts = service.value.hosts
end
end
end
local radixtree_route = {
paths = route.value.uris or route.value.uri,
methods = route.value.methods,
priority = route.value.priority,
remote_addrs = route.value.remote_addrs
or route.value.remote_addr,
vars = route.value.vars,
filter_fun = filter_fun,
handler = function (api_ctx, match_opts)
api_ctx.matched_params = nil
api_ctx.matched_route = route
api_ctx.curr_req_matched = match_opts.matched
api_ctx.real_curr_req_matched_path = match_opts.matched._path
end
}
if hosts == nil then
core.table.insert(only_uri_routes, radixtree_route)
return
end
for i, host in ipairs(hosts) do
local host_rev = host:reverse()
if not host_routes[host_rev] then
host_routes[host_rev] = {radixtree_route}
else
tab_insert(host_routes[host_rev], radixtree_route)
end
end
end
local function create_radixtree_router(routes)
local host_routes = {}
local only_uri_routes = {}
host_router = nil
routes = routes or {}
for _, route in ipairs(routes) do
local status = core.table.try_read_attr(route, "value", "status")
-- check the status
if not status or status == 1 then
push_host_router(route, host_routes, only_uri_routes)
end
end
-- create router: host_router
local host_router_routes = {}
for host_rev, routes in pairs(host_routes) do
local sub_router = router.new(routes)
core.table.insert(host_router_routes, {
paths = host_rev,
filter_fun = function(vars, opts, ...)
return sub_router:dispatch(vars.uri, opts, ...)
end,
handler = function (api_ctx, match_opts)
api_ctx.real_curr_req_matched_host = match_opts.matched._path
end
})
end
event.push(event.CONST.BUILD_ROUTER, routes)
if #host_router_routes > 0 then
host_router = router.new(host_router_routes)
end
-- create router: only_uri_router
only_uri_router = router.new(only_uri_routes)
return true
end
function _M.match(api_ctx)
local user_routes = _M.user_routes
local _, service_version = get_services()
if not cached_router_version or cached_router_version ~= user_routes.conf_version
or not cached_service_version or cached_service_version ~= service_version
then
create_radixtree_router(user_routes.values)
cached_router_version = user_routes.conf_version
cached_service_version = service_version
end
return _M.matching(api_ctx)
end
function _M.matching(api_ctx)
core.log.info("route match mode: radixtree_host_uri")
local match_opts = core.tablepool.fetch("route_match_opts", 0, 16)
match_opts.method = api_ctx.var.request_method
match_opts.remote_addr = api_ctx.var.remote_addr
match_opts.vars = api_ctx.var
match_opts.host = api_ctx.var.host
match_opts.matched = core.tablepool.fetch("matched_route_record", 0, 4)
if host_router then
local host_uri = api_ctx.var.host
local ok = host_router:dispatch(host_uri:reverse(), match_opts, api_ctx, match_opts)
if ok then
if api_ctx.real_curr_req_matched_path then
api_ctx.curr_req_matched._path = api_ctx.real_curr_req_matched_path
api_ctx.real_curr_req_matched_path = nil
end
if api_ctx.real_curr_req_matched_host then
api_ctx.curr_req_matched._host = api_ctx.real_curr_req_matched_host:reverse()
api_ctx.real_curr_req_matched_host = nil
end
core.tablepool.release("route_match_opts", match_opts)
return true
end
end
local ok = only_uri_router:dispatch(api_ctx.var.uri, match_opts, api_ctx, match_opts)
core.tablepool.release("route_match_opts", match_opts)
return ok
end
return _M

View File

@@ -0,0 +1,57 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local require = require
local core = require("apisix.core")
local base_router = require("apisix.http.route")
local get_services = require("apisix.http.service").services
local cached_router_version
local cached_service_version
local _M = {version = 0.2}
local uri_routes = {}
local uri_router
function _M.match(api_ctx)
local user_routes = _M.user_routes
local _, service_version = get_services()
if not cached_router_version or cached_router_version ~= user_routes.conf_version
or not cached_service_version or cached_service_version ~= service_version
then
uri_router = base_router.create_radixtree_uri_router(user_routes.values,
uri_routes, false)
cached_router_version = user_routes.conf_version
cached_service_version = service_version
end
if not uri_router then
core.log.error("failed to fetch valid `uri` router: ")
return true
end
return _M.matching(api_ctx)
end
function _M.matching(api_ctx)
core.log.info("route match mode: radixtree_uri")
return base_router.match_uri(uri_router, api_ctx)
end
return _M

View File

@@ -0,0 +1,57 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local require = require
local core = require("apisix.core")
local base_router = require("apisix.http.route")
local get_services = require("apisix.http.service").services
local cached_router_version
local cached_service_version
local _M = {}
local uri_routes = {}
local uri_router
function _M.match(api_ctx)
local user_routes = _M.user_routes
local _, service_version = get_services()
if not cached_router_version or cached_router_version ~= user_routes.conf_version
or not cached_service_version or cached_service_version ~= service_version
then
uri_router = base_router.create_radixtree_uri_router(user_routes.values,
uri_routes, true)
cached_router_version = user_routes.conf_version
cached_service_version = service_version
end
if not uri_router then
core.log.error("failed to fetch valid `uri_with_parameter` router: ")
return true
end
return _M.matching(api_ctx)
end
function _M.matching(api_ctx)
core.log.info("route match mode: radixtree_uri_with_parameter")
return base_router.match_uri(uri_router, api_ctx)
end
return _M

View File

@@ -0,0 +1,70 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local apisix_upstream = require("apisix.upstream")
local plugin_checker = require("apisix.plugin").plugin_checker
local services
local error = error
local _M = {
version = 0.2,
}
function _M.get(service_id)
return services:get(service_id)
end
function _M.services()
if not services then
return nil, nil
end
return services.values, services.conf_version
end
local function filter(service)
service.has_domain = false
if not service.value then
return
end
apisix_upstream.filter_upstream(service.value.upstream, service)
core.log.info("filter service: ", core.json.delay_encode(service, true))
end
function _M.init_worker()
local err
services, err = core.config.new("/services", {
automatic = true,
item_schema = core.schema.service,
checker = plugin_checker,
filter = filter,
})
if not services then
error("failed to create etcd instance for fetching /services: " .. err)
return
end
end
return _M

View File

@@ -0,0 +1,143 @@
//
// Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
syntax = "proto3";
option java_package = "org.apache.apisix.api.pubsub";
option java_outer_classname = "PubSubProto";
option java_multiple_files = true;
option go_package = "github.com/apache/apisix/api/pubsub;pubsub";
/**
* Ping command, used to keep the websocket connection alive
*
* The state field is used to pass some non-specific information,
* which will be returned in the pong response as is.
*/
message CmdPing {
bytes state = 1;
}
/**
* An empty command, a placeholder for testing purposes only
*/
message CmdEmpty {}
/**
* Get the offset of the specified topic partition from Apache Kafka.
*/
message CmdKafkaListOffset {
string topic = 1;
int32 partition = 2;
int64 timestamp = 3;
}
/**
* Fetch messages of the specified topic partition from Apache Kafka.
*/
message CmdKafkaFetch {
string topic = 1;
int32 partition = 2;
int64 offset = 3;
}
/**
* Client request definition for pubsub scenarios
*
* The sequence field is used to associate requests and responses.
* Apache APISIX will set a consistent sequence for the associated
* requests and responses, and the client can explicitly know the
* response corresponding to any of the requests.
*
* The req field is the command data sent by the client, and its
* type will be chosen from any of the lists in the definition.
*
* Field numbers 1 to 30 in the definition are used to define basic
* information and future extensions, and numbers after 30 are used
* to define commands.
*/
message PubSubReq {
int64 sequence = 1;
oneof req {
CmdEmpty cmd_empty = 31;
CmdPing cmd_ping = 32;
CmdKafkaFetch cmd_kafka_fetch = 33;
CmdKafkaListOffset cmd_kafka_list_offset = 34;
};
}
/**
* The response body of the service when an error occurs,
* containing the error code and the error message.
*/
message ErrorResp {
int32 code = 1;
string message = 2;
}
/**
* Pong response, the state field will pass through the
* value in the Ping command field.
*/
message PongResp {
bytes state = 1;
}
/**
* The definition of a message in Kafka with the current message
* offset, production timestamp, Key, and message content.
*/
message KafkaMessage {
int64 offset = 1;
int64 timestamp = 2;
bytes key = 3;
bytes value = 4;
}
/**
* The response of Fetch messages from Apache Kafka.
*/
message KafkaFetchResp {
repeated KafkaMessage messages = 1;
}
/**
* The response of list offset from Apache Kafka.
*/
message KafkaListOffsetResp {
int64 offset = 1;
}
/**
* Server response definition for pubsub scenarios
*
* The sequence field will be the same as the value in the
* request, which is used to associate the associated request
* and response.
*
* The resp field is the response data sent by the server, and
* its type will be chosen from any of the lists in the definition.
*/
message PubSubResp {
int64 sequence = 1;
oneof resp {
ErrorResp error_resp = 31;
PongResp pong_resp = 32;
KafkaFetchResp kafka_fetch_resp = 33;
KafkaListOffsetResp kafka_list_offset_resp = 34;
};
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,163 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local string_format = string.format
local debug = debug
local ipairs = ipairs
local pcall = pcall
local table_insert = table.insert
local jit = jit
local _M = {}
local hooks = {}
function _M.getname(n)
if n.what == "C" then
return n.name
end
local lc = string_format("%s:%d", n.short_src, n.currentline)
if n.what ~= "main" and n.namewhat ~= "" then
return string_format("%s (%s)", lc, n.name)
else
return lc
end
end
local function hook(_, arg)
local level = 2
local finfo = debug.getinfo(level, "nSlf")
local key = finfo.source .. "#" .. arg
local hooks2 = {}
local removed_hooks = {}
for _, hook in ipairs(hooks) do
if key:sub(-#hook.key) == hook.key then
local filter_func = hook.filter_func
local info = {finfo = finfo, uv = {}, vals = {}}
-- upvalues
local i = 1
while true do
local name, value = debug.getupvalue(finfo.func, i)
if name == nil then break end
if name:sub(1, 1) ~= "(" then
info.uv[name] = value
end
i = i + 1
end
-- local values
local i = 1
while true do
local name, value = debug.getlocal(level, i)
if not name then break end
if name:sub(1, 1) ~= "(" then
info.vals[name] = value
end
i = i + 1
end
local r1, r2_or_err = pcall(filter_func, info)
if not r1 then
core.log.error("inspect: pcall filter_func:", r2_or_err)
table_insert(removed_hooks, hook)
elseif r2_or_err == false then
-- if filter_func returns false, keep the hook
table_insert(hooks2, hook)
else
table_insert(removed_hooks, hook)
end
else
-- key not match, keep the hook
table_insert(hooks2, hook)
end
end
for _, hook in ipairs(removed_hooks) do
core.log.warn("inspect: remove hook: ", hook.key)
end
-- disable debug mode if all hooks done
if #hooks2 ~= #hooks then
hooks = hooks2
if #hooks == 0 then
core.log.warn("inspect: all hooks removed")
debug.sethook()
if jit then
jit.on()
end
end
end
end
function _M.set_hook(file, line, func, filter_func)
if file == nil then
file = "=stdin"
end
local key = file .. "#" .. line
table_insert(hooks, {key = key, filter_func = filter_func})
if jit then
jit.flush(func)
jit.off()
end
debug.sethook(hook, "l")
end
function _M.unset_hook(file, line)
if file == nil then
file = "=stdin"
end
local hooks2 = {}
local key = file .. "#" .. line
for i, hook in ipairs(hooks) do
if hook.key ~= key then
table_insert(hooks2, hook)
end
end
if #hooks2 ~= #hooks then
hooks = hooks2
if #hooks == 0 then
debug.sethook()
if jit then
jit.on()
end
end
end
end
function _M.unset_all()
if #hooks > 0 then
hooks = {}
debug.sethook()
if jit then
jit.on()
end
end
end
function _M.hooks()
return hooks
end
return _M

View File

@@ -0,0 +1,128 @@
--
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
local core = require("apisix.core")
local dbg = require("apisix.inspect.dbg")
local lfs = require("lfs")
local pl_path = require("pl.path")
local io = io
local table_insert = table.insert
local pcall = pcall
local ipairs = ipairs
local os = os
local ngx = ngx
local loadstring = loadstring
local format = string.format
local _M = {}
local last_modified = 0
local stop = false
local running = false
local last_report_time = 0
local REPORT_INTERVAL = 30 -- secs
local function run_lua_file(file)
local f, err = io.open(file, "rb")
if not f then
return false, err
end
local code, err = f:read("*all")
f:close()
if code == nil then
return false, format("cannot read hooks file: %s", err)
end
local func, err = loadstring(code)
if not func then
return false, err
end
func()
return true
end
local function setup_hooks(file)
if pl_path.exists(file) then
dbg.unset_all()
local _, err = pcall(run_lua_file, file)
local hooks = {}
for _, hook in ipairs(dbg.hooks()) do
table_insert(hooks, hook.key)
end
core.log.warn("set hooks: err: ", err, ", hooks: ", core.json.delay_encode(hooks))
end
end
local function reload_hooks(premature, delay, file)
if premature or stop then
stop = false
running = false
return
end
local time, err = lfs.attributes(file, 'modification')
if err then
if last_modified ~= 0 then
core.log.info(err, ", disable all hooks")
dbg.unset_all()
last_modified = 0
end
elseif time ~= last_modified then
setup_hooks(file)
last_modified = time
else
local ts = os.time()
if ts - last_report_time >= REPORT_INTERVAL then
local hooks = {}
for _, hook in ipairs(dbg.hooks()) do
table_insert(hooks, hook.key)
end
core.log.info("alive hooks: ", core.json.encode(hooks))
last_report_time = ts
end
end
local ok, err = ngx.timer.at(delay, reload_hooks, delay, file)
if not ok then
core.log.error("failed to create the timer: ", err)
running = false
end
end
function _M.init(delay, file)
if not running then
file = file or "/usr/local/apisix/plugin_inspect_hooks.lua"
delay = delay or 3
setup_hooks(file)
local ok, err = ngx.timer.at(delay, reload_hooks, delay, file)
if not ok then
core.log.error("failed to create the timer: ", err)
return
end
running = true
end
end
function _M.destroy()
stop = true
end
return _M

Some files were not shown because too many files have changed in this diff Show More