feat(apisix): add Cloudron package
- Implements Apache APISIX packaging for Cloudron platform. - Includes Dockerfile, CloudronManifest.json, and start.sh. - Configured to use Cloudron's etcd addon. 🤖 Generated with Gemini CLI Co-Authored-By: Gemini <noreply@google.com>
This commit is contained in:
@@ -0,0 +1,691 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
local require = require
|
||||
local local_conf = require("apisix.core.config_local").local_conf()
|
||||
local core = require("apisix.core")
|
||||
local core_sleep = require("apisix.core.utils").sleep
|
||||
local resty_consul = require('resty.consul')
|
||||
local http = require('resty.http')
|
||||
local util = require("apisix.cli.util")
|
||||
local ipairs = ipairs
|
||||
local error = error
|
||||
local ngx = ngx
|
||||
local unpack = unpack
|
||||
local tonumber = tonumber
|
||||
local pairs = pairs
|
||||
local ngx_timer_at = ngx.timer.at
|
||||
local ngx_timer_every = ngx.timer.every
|
||||
local log = core.log
|
||||
local json_delay_encode = core.json.delay_encode
|
||||
local ngx_worker_id = ngx.worker.id
|
||||
local exiting = ngx.worker.exiting
|
||||
local thread_spawn = ngx.thread.spawn
|
||||
local thread_wait = ngx.thread.wait
|
||||
local thread_kill = ngx.thread.kill
|
||||
local math_random = math.random
|
||||
local pcall = pcall
|
||||
local null = ngx.null
|
||||
local type = type
|
||||
local next = next
|
||||
|
||||
local all_services = core.table.new(0, 5)
|
||||
local default_service
|
||||
local default_weight
|
||||
local sort_type
|
||||
local skip_service_map = core.table.new(0, 1)
|
||||
local dump_params
|
||||
|
||||
local events
|
||||
local events_list
|
||||
local consul_services
|
||||
|
||||
local default_skip_services = {"consul"}
|
||||
local default_random_range = 5
|
||||
local default_catalog_error_index = -1
|
||||
local default_health_error_index = -2
|
||||
local watch_type_catalog = 1
|
||||
local watch_type_health = 2
|
||||
local max_retry_time = 256
|
||||
|
||||
local _M = {
|
||||
version = 0.3,
|
||||
}
|
||||
|
||||
|
||||
local function discovery_consul_callback(data, event, source, pid)
|
||||
all_services = data
|
||||
log.notice("update local variable all_services, event is: ", event,
|
||||
"source: ", source, "server pid:", pid,
|
||||
", all services: ", json_delay_encode(all_services, true))
|
||||
end
|
||||
|
||||
|
||||
function _M.all_nodes()
|
||||
return all_services
|
||||
end
|
||||
|
||||
|
||||
function _M.nodes(service_name)
|
||||
if not all_services then
|
||||
log.error("all_services is nil, failed to fetch nodes for : ", service_name)
|
||||
return
|
||||
end
|
||||
|
||||
local resp_list = all_services[service_name]
|
||||
|
||||
if not resp_list then
|
||||
log.error("fetch nodes failed by ", service_name, ", return default service")
|
||||
return default_service and {default_service}
|
||||
end
|
||||
|
||||
log.info("process id: ", ngx_worker_id(), ", all_services[", service_name, "] = ",
|
||||
json_delay_encode(resp_list, true))
|
||||
|
||||
return resp_list
|
||||
end
|
||||
|
||||
|
||||
local function update_all_services(consul_server_url, up_services)
|
||||
-- clean old unused data
|
||||
local old_services = consul_services[consul_server_url] or {}
|
||||
for k, _ in pairs(old_services) do
|
||||
all_services[k] = nil
|
||||
end
|
||||
core.table.clear(old_services)
|
||||
|
||||
for k, v in pairs(up_services) do
|
||||
all_services[k] = v
|
||||
end
|
||||
consul_services[consul_server_url] = up_services
|
||||
|
||||
log.info("update all services: ", json_delay_encode(all_services, true))
|
||||
end
|
||||
|
||||
|
||||
local function read_dump_services()
|
||||
local data, err = util.read_file(dump_params.path)
|
||||
if not data then
|
||||
log.error("read dump file get error: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
log.info("read dump file: ", data)
|
||||
data = util.trim(data)
|
||||
if #data == 0 then
|
||||
log.error("dump file is empty")
|
||||
return
|
||||
end
|
||||
|
||||
local entity, err = core.json.decode(data)
|
||||
if not entity then
|
||||
log.error("decoded dump data got error: ", err, ", file content: ", data)
|
||||
return
|
||||
end
|
||||
|
||||
if not entity.services or not entity.last_update then
|
||||
log.warn("decoded dump data miss fields, file content: ", data)
|
||||
return
|
||||
end
|
||||
|
||||
local now_time = ngx.time()
|
||||
log.info("dump file last_update: ", entity.last_update, ", dump_params.expire: ",
|
||||
dump_params.expire, ", now_time: ", now_time)
|
||||
if dump_params.expire ~= 0 and (entity.last_update + dump_params.expire) < now_time then
|
||||
log.warn("dump file: ", dump_params.path, " had expired, ignored it")
|
||||
return
|
||||
end
|
||||
|
||||
all_services = entity.services
|
||||
log.info("load dump file into memory success")
|
||||
end
|
||||
|
||||
|
||||
local function write_dump_services()
|
||||
local entity = {
|
||||
services = all_services,
|
||||
last_update = ngx.time(),
|
||||
expire = dump_params.expire, -- later need handle it
|
||||
}
|
||||
local data = core.json.encode(entity)
|
||||
local succ, err = util.write_file(dump_params.path, data)
|
||||
if not succ then
|
||||
log.error("write dump into file got error: ", err)
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
local function show_dump_file()
|
||||
if not dump_params then
|
||||
return 503, "dump params is nil"
|
||||
end
|
||||
|
||||
local data, err = util.read_file(dump_params.path)
|
||||
if not data then
|
||||
return 503, err
|
||||
end
|
||||
|
||||
return 200, data
|
||||
end
|
||||
|
||||
|
||||
local function get_retry_delay(retry_delay)
|
||||
if not retry_delay or retry_delay >= max_retry_time then
|
||||
retry_delay = 1
|
||||
else
|
||||
retry_delay = retry_delay * 4
|
||||
end
|
||||
|
||||
return retry_delay
|
||||
end
|
||||
|
||||
|
||||
local function get_opts(consul_server, is_catalog)
|
||||
local opts = {
|
||||
host = consul_server.host,
|
||||
port = consul_server.port,
|
||||
connect_timeout = consul_server.connect_timeout,
|
||||
read_timeout = consul_server.read_timeout,
|
||||
default_args = {
|
||||
token = consul_server.token,
|
||||
}
|
||||
}
|
||||
if not consul_server.keepalive then
|
||||
return opts
|
||||
end
|
||||
|
||||
opts.default_args.wait = consul_server.wait_timeout --blocked wait!=0; unblocked by wait=0
|
||||
|
||||
if is_catalog then
|
||||
opts.default_args.index = consul_server.catalog_index
|
||||
else
|
||||
opts.default_args.index = consul_server.health_index
|
||||
end
|
||||
|
||||
return opts
|
||||
end
|
||||
|
||||
|
||||
local function watch_catalog(consul_server)
|
||||
local client = resty_consul:new(get_opts(consul_server, true))
|
||||
|
||||
::RETRY::
|
||||
local watch_result, watch_err = client:get(consul_server.consul_watch_catalog_url)
|
||||
local watch_error_info = (watch_err ~= nil and watch_err)
|
||||
or ((watch_result ~= nil and watch_result.status ~= 200)
|
||||
and watch_result.status)
|
||||
if watch_error_info then
|
||||
log.error("connect consul: ", consul_server.consul_server_url,
|
||||
" by sub url: ", consul_server.consul_watch_catalog_url,
|
||||
", got watch result: ", json_delay_encode(watch_result),
|
||||
", with error: ", watch_error_info)
|
||||
|
||||
return watch_type_catalog, default_catalog_error_index
|
||||
end
|
||||
|
||||
if consul_server.catalog_index > 0
|
||||
and consul_server.catalog_index == tonumber(watch_result.headers['X-Consul-Index']) then
|
||||
local random_delay = math_random(default_random_range)
|
||||
log.info("watch catalog has no change, re-watch consul after ", random_delay, " seconds")
|
||||
core_sleep(random_delay)
|
||||
goto RETRY
|
||||
end
|
||||
|
||||
return watch_type_catalog, watch_result.headers['X-Consul-Index']
|
||||
end
|
||||
|
||||
|
||||
local function watch_health(consul_server)
|
||||
local client = resty_consul:new(get_opts(consul_server, false))
|
||||
|
||||
::RETRY::
|
||||
local watch_result, watch_err = client:get(consul_server.consul_watch_health_url)
|
||||
local watch_error_info = (watch_err ~= nil and watch_err)
|
||||
or ((watch_result ~= nil and watch_result.status ~= 200)
|
||||
and watch_result.status)
|
||||
if watch_error_info then
|
||||
log.error("connect consul: ", consul_server.consul_server_url,
|
||||
" by sub url: ", consul_server.consul_watch_health_url,
|
||||
", got watch result: ", json_delay_encode(watch_result),
|
||||
", with error: ", watch_error_info)
|
||||
|
||||
return watch_type_health, default_health_error_index
|
||||
end
|
||||
|
||||
if consul_server.health_index > 0
|
||||
and consul_server.health_index == tonumber(watch_result.headers['X-Consul-Index']) then
|
||||
local random_delay = math_random(default_random_range)
|
||||
log.info("watch health has no change, re-watch consul after ", random_delay, " seconds")
|
||||
core_sleep(random_delay)
|
||||
goto RETRY
|
||||
end
|
||||
|
||||
return watch_type_health, watch_result.headers['X-Consul-Index']
|
||||
end
|
||||
|
||||
|
||||
local function check_keepalive(consul_server, retry_delay)
|
||||
if consul_server.keepalive and not exiting() then
|
||||
local ok, err = ngx_timer_at(0, _M.connect, consul_server, retry_delay)
|
||||
if not ok then
|
||||
log.error("create ngx_timer_at got error: ", err)
|
||||
return
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
local function update_index(consul_server, catalog_index, health_index)
|
||||
local c_index = 0
|
||||
local h_index = 0
|
||||
if catalog_index ~= nil then
|
||||
c_index = tonumber(catalog_index)
|
||||
end
|
||||
|
||||
if health_index ~= nil then
|
||||
h_index = tonumber(health_index)
|
||||
end
|
||||
|
||||
if c_index > 0 then
|
||||
consul_server.catalog_index = c_index
|
||||
end
|
||||
|
||||
if h_index > 0 then
|
||||
consul_server.health_index = h_index
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
local function is_not_empty(value)
|
||||
if value == nil or value == null
|
||||
or (type(value) == "table" and not next(value))
|
||||
or (type(value) == "string" and value == "")
|
||||
then
|
||||
return false
|
||||
end
|
||||
|
||||
return true
|
||||
end
|
||||
|
||||
|
||||
local function watch_result_is_valid(watch_type, index, catalog_index, health_index)
|
||||
if index <= 0 then
|
||||
return false
|
||||
end
|
||||
|
||||
if watch_type == watch_type_catalog then
|
||||
if index == catalog_index then
|
||||
return false
|
||||
end
|
||||
else
|
||||
if index == health_index then
|
||||
return false
|
||||
end
|
||||
end
|
||||
|
||||
return true
|
||||
end
|
||||
|
||||
|
||||
local function combine_sort_nodes_cmp(left, right)
|
||||
if left.host ~= right.host then
|
||||
return left.host < right.host
|
||||
end
|
||||
|
||||
return left.port < right.port
|
||||
end
|
||||
|
||||
|
||||
local function port_sort_nodes_cmp(left, right)
|
||||
return left.port < right.port
|
||||
end
|
||||
|
||||
|
||||
local function host_sort_nodes_cmp(left, right)
|
||||
return left.host < right.host
|
||||
end
|
||||
|
||||
|
||||
function _M.connect(premature, consul_server, retry_delay)
|
||||
if premature then
|
||||
return
|
||||
end
|
||||
|
||||
local catalog_thread, spawn_catalog_err = thread_spawn(watch_catalog, consul_server)
|
||||
if not catalog_thread then
|
||||
local random_delay = math_random(default_random_range)
|
||||
log.error("failed to spawn thread watch catalog: ", spawn_catalog_err,
|
||||
", retry connecting consul after ", random_delay, " seconds")
|
||||
core_sleep(random_delay)
|
||||
|
||||
check_keepalive(consul_server, retry_delay)
|
||||
return
|
||||
end
|
||||
|
||||
local health_thread, err = thread_spawn(watch_health, consul_server)
|
||||
if not health_thread then
|
||||
thread_kill(catalog_thread)
|
||||
local random_delay = math_random(default_random_range)
|
||||
log.error("failed to spawn thread watch health: ", err, ", retry connecting consul after ",
|
||||
random_delay, " seconds")
|
||||
core_sleep(random_delay)
|
||||
|
||||
check_keepalive(consul_server, retry_delay)
|
||||
return
|
||||
end
|
||||
|
||||
local thread_wait_ok, watch_type, index = thread_wait(catalog_thread, health_thread)
|
||||
thread_kill(catalog_thread)
|
||||
thread_kill(health_thread)
|
||||
if not thread_wait_ok then
|
||||
local random_delay = math_random(default_random_range)
|
||||
log.error("failed to wait thread: ", watch_type, ", retry connecting consul after ",
|
||||
random_delay, " seconds")
|
||||
core_sleep(random_delay)
|
||||
|
||||
check_keepalive(consul_server, retry_delay)
|
||||
return
|
||||
end
|
||||
|
||||
-- double check index has changed
|
||||
if not watch_result_is_valid(tonumber(watch_type),
|
||||
tonumber(index), consul_server.catalog_index, consul_server.health_index) then
|
||||
retry_delay = get_retry_delay(retry_delay)
|
||||
log.warn("get all svcs got err, retry connecting consul after ", retry_delay, " seconds")
|
||||
core_sleep(retry_delay)
|
||||
|
||||
check_keepalive(consul_server, retry_delay)
|
||||
return
|
||||
end
|
||||
|
||||
local consul_client = resty_consul:new({
|
||||
host = consul_server.host,
|
||||
port = consul_server.port,
|
||||
connect_timeout = consul_server.connect_timeout,
|
||||
read_timeout = consul_server.read_timeout,
|
||||
default_args = {
|
||||
token = consul_server.token
|
||||
}
|
||||
})
|
||||
local catalog_success, catalog_res, catalog_err = pcall(function()
|
||||
return consul_client:get(consul_server.consul_watch_catalog_url)
|
||||
end)
|
||||
if not catalog_success then
|
||||
log.error("connect consul: ", consul_server.consul_server_url,
|
||||
" by sub url: ", consul_server.consul_watch_catalog_url,
|
||||
", got catalog result: ", json_delay_encode(catalog_res))
|
||||
check_keepalive(consul_server, retry_delay)
|
||||
return
|
||||
end
|
||||
local catalog_error_info = (catalog_err ~= nil and catalog_err)
|
||||
or ((catalog_res ~= nil and catalog_res.status ~= 200)
|
||||
and catalog_res.status)
|
||||
if catalog_error_info then
|
||||
log.error("connect consul: ", consul_server.consul_server_url,
|
||||
" by sub url: ", consul_server.consul_watch_catalog_url,
|
||||
", got catalog result: ", json_delay_encode(catalog_res),
|
||||
", with error: ", catalog_error_info)
|
||||
|
||||
retry_delay = get_retry_delay(retry_delay)
|
||||
log.warn("get all svcs got err, retry connecting consul after ", retry_delay, " seconds")
|
||||
core_sleep(retry_delay)
|
||||
|
||||
check_keepalive(consul_server, retry_delay)
|
||||
return
|
||||
end
|
||||
|
||||
-- get health index
|
||||
local success, health_res, health_err = pcall(function()
|
||||
return consul_client:get(consul_server.consul_watch_health_url)
|
||||
end)
|
||||
if not success then
|
||||
log.error("connect consul: ", consul_server.consul_server_url,
|
||||
" by sub url: ", consul_server.consul_watch_health_url,
|
||||
", got health result: ", json_delay_encode(health_res))
|
||||
check_keepalive(consul_server, retry_delay)
|
||||
return
|
||||
end
|
||||
local health_error_info = (health_err ~= nil and health_err)
|
||||
or ((health_res ~= nil and health_res.status ~= 200)
|
||||
and health_res.status)
|
||||
if health_error_info then
|
||||
log.error("connect consul: ", consul_server.consul_server_url,
|
||||
" by sub url: ", consul_server.consul_watch_health_url,
|
||||
", got health result: ", json_delay_encode(health_res),
|
||||
", with error: ", health_error_info)
|
||||
|
||||
retry_delay = get_retry_delay(retry_delay)
|
||||
log.warn("get all svcs got err, retry connecting consul after ", retry_delay, " seconds")
|
||||
core_sleep(retry_delay)
|
||||
|
||||
check_keepalive(consul_server, retry_delay)
|
||||
return
|
||||
end
|
||||
|
||||
log.info("connect consul: ", consul_server.consul_server_url,
|
||||
", catalog_result status: ", catalog_res.status,
|
||||
", catalog_result.headers.index: ", catalog_res.headers['X-Consul-Index'],
|
||||
", consul_server.index: ", consul_server.index,
|
||||
", consul_server: ", json_delay_encode(consul_server))
|
||||
|
||||
-- if the current index is different from the last index, then update the service
|
||||
if (consul_server.catalog_index ~= tonumber(catalog_res.headers['X-Consul-Index']))
|
||||
or (consul_server.health_index ~= tonumber(health_res.headers['X-Consul-Index'])) then
|
||||
local up_services = core.table.new(0, #catalog_res.body)
|
||||
for service_name, _ in pairs(catalog_res.body) do
|
||||
-- check if the service_name is 'skip service'
|
||||
if skip_service_map[service_name] then
|
||||
goto CONTINUE
|
||||
end
|
||||
|
||||
-- get node from service
|
||||
local svc_url = consul_server.consul_sub_url .. "/" .. service_name
|
||||
local svc_success, result, get_err = pcall(function()
|
||||
return consul_client:get(svc_url, {passing = true})
|
||||
end)
|
||||
local error_info = (get_err ~= nil and get_err) or
|
||||
((result ~= nil and result.status ~= 200) and result.status)
|
||||
if not svc_success or error_info then
|
||||
log.error("connect consul: ", consul_server.consul_server_url,
|
||||
", by service url: ", svc_url, ", with error: ", error_info)
|
||||
goto CONTINUE
|
||||
end
|
||||
|
||||
-- decode body, decode json, update service, error handling
|
||||
-- check result body is not nil and not empty
|
||||
if is_not_empty(result.body) then
|
||||
-- add services to table
|
||||
local nodes = up_services[service_name]
|
||||
local nodes_uniq = {}
|
||||
for _, node in ipairs(result.body) do
|
||||
if not node.Service then
|
||||
goto CONTINUE
|
||||
end
|
||||
|
||||
local svc_address, svc_port = node.Service.Address, node.Service.Port
|
||||
-- Handle nil or 0 port case - default to 80 for HTTP services
|
||||
if not svc_port or svc_port == 0 then
|
||||
svc_port = 80
|
||||
end
|
||||
-- if nodes is nil, new nodes table and set to up_services
|
||||
if not nodes then
|
||||
nodes = core.table.new(1, 0)
|
||||
up_services[service_name] = nodes
|
||||
end
|
||||
-- not store duplicate service IDs.
|
||||
local service_id = svc_address .. ":" .. svc_port
|
||||
if not nodes_uniq[service_id] then
|
||||
-- add node to nodes table
|
||||
core.table.insert(nodes, {
|
||||
host = svc_address,
|
||||
port = tonumber(svc_port),
|
||||
weight = default_weight,
|
||||
})
|
||||
nodes_uniq[service_id] = true
|
||||
end
|
||||
end
|
||||
if nodes then
|
||||
if sort_type == "port_sort" then
|
||||
core.table.sort(nodes, port_sort_nodes_cmp)
|
||||
|
||||
elseif sort_type == "host_sort" then
|
||||
core.table.sort(nodes, host_sort_nodes_cmp)
|
||||
|
||||
elseif sort_type == "combine_sort" then
|
||||
core.table.sort(nodes, combine_sort_nodes_cmp)
|
||||
|
||||
end
|
||||
end
|
||||
up_services[service_name] = nodes
|
||||
end
|
||||
:: CONTINUE ::
|
||||
end
|
||||
|
||||
update_all_services(consul_server.consul_server_url, up_services)
|
||||
|
||||
--update events
|
||||
local post_ok, post_err = events:post(events_list._source,
|
||||
events_list.updating, all_services)
|
||||
if not post_ok then
|
||||
log.error("post_event failure with ", events_list._source,
|
||||
", update all services error: ", post_err)
|
||||
end
|
||||
|
||||
if dump_params then
|
||||
ngx_timer_at(0, write_dump_services)
|
||||
end
|
||||
|
||||
update_index(consul_server,
|
||||
catalog_res.headers['X-Consul-Index'],
|
||||
health_res.headers['X-Consul-Index'])
|
||||
end
|
||||
|
||||
check_keepalive(consul_server, retry_delay)
|
||||
end
|
||||
|
||||
|
||||
local function format_consul_params(consul_conf)
|
||||
local consul_server_list = core.table.new(0, #consul_conf.servers)
|
||||
|
||||
for _, v in pairs(consul_conf.servers) do
|
||||
local scheme, host, port, path = unpack(http.parse_uri(nil, v))
|
||||
if scheme ~= "http" then
|
||||
return nil, "only support consul http schema address, eg: http://address:port"
|
||||
elseif path ~= "/" or core.string.has_suffix(v, '/') then
|
||||
return nil, "invalid consul server address, the valid format: http://address:port"
|
||||
end
|
||||
core.table.insert(consul_server_list, {
|
||||
host = host,
|
||||
port = port,
|
||||
token = consul_conf.token,
|
||||
connect_timeout = consul_conf.timeout.connect,
|
||||
read_timeout = consul_conf.timeout.read,
|
||||
wait_timeout = consul_conf.timeout.wait,
|
||||
consul_watch_catalog_url = "/catalog/services",
|
||||
consul_sub_url = "/health/service",
|
||||
consul_watch_health_url = "/health/state/any",
|
||||
consul_server_url = v .. "/v1",
|
||||
weight = consul_conf.weight,
|
||||
keepalive = consul_conf.keepalive,
|
||||
health_index = 0,
|
||||
catalog_index = 0,
|
||||
fetch_interval = consul_conf.fetch_interval -- fetch interval to next connect consul
|
||||
})
|
||||
end
|
||||
return consul_server_list, nil
|
||||
end
|
||||
|
||||
|
||||
function _M.init_worker()
|
||||
local consul_conf = local_conf.discovery.consul
|
||||
|
||||
if consul_conf.dump then
|
||||
local dump = consul_conf.dump
|
||||
dump_params = dump
|
||||
|
||||
if dump.load_on_init then
|
||||
read_dump_services()
|
||||
end
|
||||
end
|
||||
|
||||
events = require("apisix.events")
|
||||
events_list = events:event_list(
|
||||
"discovery_consul_update_all_services",
|
||||
"updating"
|
||||
)
|
||||
|
||||
if 0 ~= ngx_worker_id() then
|
||||
events:register(discovery_consul_callback, events_list._source, events_list.updating)
|
||||
return
|
||||
end
|
||||
|
||||
log.notice("consul_conf: ", json_delay_encode(consul_conf, true))
|
||||
default_weight = consul_conf.weight
|
||||
sort_type = consul_conf.sort_type
|
||||
-- set default service, used when the server node cannot be found
|
||||
if consul_conf.default_service then
|
||||
default_service = consul_conf.default_service
|
||||
default_service.weight = default_weight
|
||||
end
|
||||
if consul_conf.skip_services then
|
||||
skip_service_map = core.table.new(0, #consul_conf.skip_services)
|
||||
for _, v in ipairs(consul_conf.skip_services) do
|
||||
skip_service_map[v] = true
|
||||
end
|
||||
end
|
||||
-- set up default skip service
|
||||
for _, v in ipairs(default_skip_services) do
|
||||
skip_service_map[v] = true
|
||||
end
|
||||
|
||||
local consul_servers_list, err = format_consul_params(consul_conf)
|
||||
if err then
|
||||
error("format consul config got error: " .. err)
|
||||
end
|
||||
log.info("consul_server_list: ", json_delay_encode(consul_servers_list, true))
|
||||
|
||||
consul_services = core.table.new(0, 1)
|
||||
-- success or failure
|
||||
for _, server in ipairs(consul_servers_list) do
|
||||
local ok, err = ngx_timer_at(0, _M.connect, server)
|
||||
if not ok then
|
||||
error("create consul got error: " .. err)
|
||||
end
|
||||
|
||||
if server.keepalive == false then
|
||||
ngx_timer_every(server.fetch_interval, _M.connect, server)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
function _M.dump_data()
|
||||
return {config = local_conf.discovery.consul, services = all_services }
|
||||
end
|
||||
|
||||
|
||||
function _M.control_api()
|
||||
return {
|
||||
{
|
||||
methods = {"GET"},
|
||||
uris = {"/show_dump_file"},
|
||||
handler = show_dump_file,
|
||||
}
|
||||
}
|
||||
end
|
||||
|
||||
|
||||
return _M
|
@@ -0,0 +1,92 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
return {
|
||||
type = "object",
|
||||
properties = {
|
||||
servers = {
|
||||
type = "array",
|
||||
minItems = 1,
|
||||
items = {
|
||||
type = "string",
|
||||
}
|
||||
},
|
||||
token = {type = "string", default = ""},
|
||||
fetch_interval = {type = "integer", minimum = 1, default = 3},
|
||||
keepalive = {
|
||||
type = "boolean",
|
||||
default = true
|
||||
},
|
||||
weight = {type = "integer", minimum = 1, default = 1},
|
||||
timeout = {
|
||||
type = "object",
|
||||
properties = {
|
||||
connect = {type = "integer", minimum = 1, default = 2000},
|
||||
read = {type = "integer", minimum = 1, default = 2000},
|
||||
wait = {type = "integer", minimum = 1, default = 60}
|
||||
},
|
||||
default = {
|
||||
connect = 2000,
|
||||
read = 2000,
|
||||
wait = 60,
|
||||
}
|
||||
},
|
||||
sort_type = {
|
||||
type = "string",
|
||||
enum = {"origin", "host_sort", "port_sort", "combine_sort"},
|
||||
default = "origin",
|
||||
},
|
||||
skip_services = {
|
||||
type = "array",
|
||||
minItems = 1,
|
||||
items = {
|
||||
type = "string",
|
||||
}
|
||||
},
|
||||
dump = {
|
||||
type = "object",
|
||||
properties = {
|
||||
path = {type = "string", minLength = 1},
|
||||
load_on_init = {type = "boolean", default = true},
|
||||
expire = {type = "integer", default = 0},
|
||||
},
|
||||
required = {"path"},
|
||||
},
|
||||
default_service = {
|
||||
type = "object",
|
||||
properties = {
|
||||
host = {type = "string"},
|
||||
port = {type = "integer"},
|
||||
metadata = {
|
||||
type = "object",
|
||||
properties = {
|
||||
fail_timeout = {type = "integer", default = 1},
|
||||
weight = {type = "integer", default = 1},
|
||||
max_fails = {type = "integer", default = 1}
|
||||
},
|
||||
default = {
|
||||
fail_timeout = 1,
|
||||
weight = 1,
|
||||
max_fails = 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
required = {"servers"}
|
||||
}
|
||||
|
@@ -0,0 +1,439 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
local require = require
|
||||
local local_conf = require("apisix.core.config_local").local_conf()
|
||||
local core = require("apisix.core")
|
||||
local core_sleep = require("apisix.core.utils").sleep
|
||||
local resty_consul = require('resty.consul')
|
||||
local cjson = require('cjson')
|
||||
local http = require('resty.http')
|
||||
local util = require("apisix.cli.util")
|
||||
local ipairs = ipairs
|
||||
local error = error
|
||||
local ngx = ngx
|
||||
local unpack = unpack
|
||||
local ngx_re_match = ngx.re.match
|
||||
local tonumber = tonumber
|
||||
local pairs = pairs
|
||||
local ipairs = ipairs
|
||||
local ngx_timer_at = ngx.timer.at
|
||||
local ngx_timer_every = ngx.timer.every
|
||||
local log = core.log
|
||||
local ngx_decode_base64 = ngx.decode_base64
|
||||
local json_delay_encode = core.json.delay_encode
|
||||
local cjson_null = cjson.null
|
||||
|
||||
local applications = core.table.new(0, 5)
|
||||
local default_service
|
||||
local default_weight
|
||||
local default_prefix_rule
|
||||
local skip_keys_map = core.table.new(0, 1)
|
||||
local dump_params
|
||||
|
||||
local events
|
||||
local events_list
|
||||
local consul_apps
|
||||
|
||||
local _M = {
|
||||
version = 0.3,
|
||||
}
|
||||
|
||||
|
||||
local function discovery_consul_callback(data, event, source, pid)
|
||||
applications = data
|
||||
log.notice("update local variable application, event is: ", event,
|
||||
"source: ", source, "server pid:", pid,
|
||||
", application: ", core.json.encode(applications, true))
|
||||
end
|
||||
|
||||
|
||||
function _M.all_nodes()
|
||||
return applications
|
||||
end
|
||||
|
||||
|
||||
function _M.nodes(service_name)
|
||||
if not applications then
|
||||
log.error("application is nil, failed to fetch nodes for : ", service_name)
|
||||
return
|
||||
end
|
||||
|
||||
local resp_list = applications[service_name]
|
||||
|
||||
if not resp_list then
|
||||
log.error("fetch nodes failed by ", service_name, ", return default service")
|
||||
return default_service and {default_service}
|
||||
end
|
||||
|
||||
log.info("process id: ", ngx.worker.id(), ", applications[", service_name, "] = ",
|
||||
json_delay_encode(resp_list, true))
|
||||
|
||||
return resp_list
|
||||
end
|
||||
|
||||
|
||||
local function parse_instance(node, server_name_prefix)
|
||||
local key = node.Key
|
||||
|
||||
if key == cjson_null or not key or #key == 0 then
|
||||
log.error("consul_key_empty, server_name_prefix: ", server_name_prefix,
|
||||
", node: ", json_delay_encode(node, true))
|
||||
return false
|
||||
end
|
||||
|
||||
local result = ngx_re_match(key, default_prefix_rule, "jo")
|
||||
if not result then
|
||||
log.error("server name parse error, server_name_prefix: ", server_name_prefix,
|
||||
", node: ", json_delay_encode(node, true))
|
||||
return false
|
||||
end
|
||||
|
||||
local sn, host, port = result[1], result[2], result[3]
|
||||
|
||||
-- if exist, skip special kesy
|
||||
if sn and skip_keys_map[sn] then
|
||||
return false
|
||||
end
|
||||
|
||||
-- base64 value = "IHsid2VpZ2h0IjogMTIwLCAibWF4X2ZhaWxzIjogMiwgImZhaWxfdGltZW91dCI6IDJ9"
|
||||
-- ori value = "{"weight": 120, "max_fails": 2, "fail_timeout": 2}"
|
||||
local metadataBase64 = node.Value
|
||||
if metadataBase64 == cjson_null or not metadataBase64 or #metadataBase64 == 0 then
|
||||
log.error("error: consul_value_empty, server_name_prefix: ", server_name_prefix,
|
||||
", node: ", json_delay_encode(node, true))
|
||||
return false
|
||||
end
|
||||
|
||||
local metadata, err = core.json.decode(ngx_decode_base64(metadataBase64))
|
||||
if err then
|
||||
log.error("invalid upstream value, server_name_prefix: ", server_name_prefix,
|
||||
",err: ", err, ", node: ", json_delay_encode(node, true))
|
||||
return false
|
||||
elseif metadata.check_status == false or metadata.check_status == "false" then
|
||||
log.error("server node unhealthy, server_name_prefix: ", server_name_prefix,
|
||||
", node: ", json_delay_encode(node, true))
|
||||
return false
|
||||
end
|
||||
|
||||
return true, host, tonumber(port), metadata, sn
|
||||
end
|
||||
|
||||
|
||||
local function update_application(server_name_prefix, data)
|
||||
local sn
|
||||
local up_apps = core.table.new(0, #data)
|
||||
local weight = default_weight
|
||||
|
||||
for _, node in ipairs(data) do
|
||||
local succ, ip, port, metadata, server_name = parse_instance(node, server_name_prefix)
|
||||
if succ then
|
||||
sn = server_name_prefix .. server_name
|
||||
local nodes = up_apps[sn]
|
||||
if not nodes then
|
||||
nodes = core.table.new(1, 0)
|
||||
up_apps[sn] = nodes
|
||||
end
|
||||
core.table.insert(nodes, {
|
||||
host = ip,
|
||||
port = port,
|
||||
weight = metadata and metadata.weight or weight,
|
||||
})
|
||||
end
|
||||
end
|
||||
|
||||
-- clean old unused data
|
||||
local old_apps = consul_apps[server_name_prefix] or {}
|
||||
for k, _ in pairs(old_apps) do
|
||||
applications[k] = nil
|
||||
end
|
||||
core.table.clear(old_apps)
|
||||
|
||||
for k, v in pairs(up_apps) do
|
||||
applications[k] = v
|
||||
end
|
||||
consul_apps[server_name_prefix] = up_apps
|
||||
|
||||
log.info("update applications: ", core.json.encode(applications))
|
||||
end
|
||||
|
||||
|
||||
local function read_dump_srvs()
|
||||
local data, err = util.read_file(dump_params.path)
|
||||
if not data then
|
||||
log.notice("read dump file get error: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
log.info("read dump file: ", data)
|
||||
data = util.trim(data)
|
||||
if #data == 0 then
|
||||
log.error("dump file is empty")
|
||||
return
|
||||
end
|
||||
|
||||
local entity, err = core.json.decode(data)
|
||||
if not entity then
|
||||
log.error("decoded dump data got error: ", err, ", file content: ", data)
|
||||
return
|
||||
end
|
||||
|
||||
if not entity.services or not entity.last_update then
|
||||
log.warn("decoded dump data miss fields, file content: ", data)
|
||||
return
|
||||
end
|
||||
|
||||
local now_time = ngx.time()
|
||||
log.info("dump file last_update: ", entity.last_update, ", dump_params.expire: ",
|
||||
dump_params.expire, ", now_time: ", now_time)
|
||||
if dump_params.expire ~= 0 and (entity.last_update + dump_params.expire) < now_time then
|
||||
log.warn("dump file: ", dump_params.path, " had expired, ignored it")
|
||||
return
|
||||
end
|
||||
|
||||
applications = entity.services
|
||||
log.info("load dump file into memory success")
|
||||
end
|
||||
|
||||
|
||||
local function write_dump_srvs()
|
||||
local entity = {
|
||||
services = applications,
|
||||
last_update = ngx.time(),
|
||||
expire = dump_params.expire, -- later need handle it
|
||||
}
|
||||
local data = core.json.encode(entity)
|
||||
local succ, err = util.write_file(dump_params.path, data)
|
||||
if not succ then
|
||||
log.error("write dump into file got error: ", err)
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
local function show_dump_file()
|
||||
if not dump_params then
|
||||
return 503, "dump params is nil"
|
||||
end
|
||||
|
||||
local data, err = util.read_file(dump_params.path)
|
||||
if not data then
|
||||
return 503, err
|
||||
end
|
||||
|
||||
return 200, data
|
||||
end
|
||||
|
||||
|
||||
function _M.connect(premature, consul_server, retry_delay)
|
||||
if premature then
|
||||
return
|
||||
end
|
||||
|
||||
local consul_client = resty_consul:new({
|
||||
host = consul_server.host,
|
||||
port = consul_server.port,
|
||||
connect_timeout = consul_server.connect_timeout,
|
||||
read_timeout = consul_server.read_timeout,
|
||||
default_args = consul_server.default_args,
|
||||
})
|
||||
|
||||
log.info("consul_server: ", json_delay_encode(consul_server, true))
|
||||
local result, err = consul_client:get(consul_server.consul_key)
|
||||
local error_info = (err ~= nil and err)
|
||||
or ((result ~= nil and result.status ~= 200)
|
||||
and result.status)
|
||||
if error_info then
|
||||
log.error("connect consul: ", consul_server.server_name_key,
|
||||
" by key: ", consul_server.consul_key,
|
||||
", got result: ", json_delay_encode(result, true),
|
||||
", with error: ", error_info)
|
||||
|
||||
if not retry_delay then
|
||||
retry_delay = 1
|
||||
else
|
||||
retry_delay = retry_delay * 4
|
||||
end
|
||||
|
||||
log.warn("retry connecting consul after ", retry_delay, " seconds")
|
||||
core_sleep(retry_delay)
|
||||
|
||||
goto ERR
|
||||
end
|
||||
|
||||
log.info("connect consul: ", consul_server.server_name_key,
|
||||
", result status: ", result.status,
|
||||
", result.headers.index: ", result.headers['X-Consul-Index'],
|
||||
", result body: ", json_delay_encode(result.body))
|
||||
|
||||
-- if current index different last index then update application
|
||||
if consul_server.index ~= result.headers['X-Consul-Index'] then
|
||||
consul_server.index = result.headers['X-Consul-Index']
|
||||
-- only long connect type use index
|
||||
if consul_server.keepalive then
|
||||
consul_server.default_args.index = result.headers['X-Consul-Index']
|
||||
end
|
||||
|
||||
-- decode body, decode json, update application, error handling
|
||||
if result.body and #result.body ~= 0 then
|
||||
log.notice("server_name: ", consul_server.server_name_key,
|
||||
", header: ", core.json.encode(result.headers, true),
|
||||
", body: ", core.json.encode(result.body, true))
|
||||
|
||||
update_application(consul_server.server_name_key, result.body)
|
||||
--update events
|
||||
local ok, err = events:post(events_list._source, events_list.updating, applications)
|
||||
if not ok then
|
||||
log.error("post_event failure with ", events_list._source,
|
||||
", update application error: ", err)
|
||||
end
|
||||
|
||||
if dump_params then
|
||||
ngx_timer_at(0, write_dump_srvs)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
:: ERR ::
|
||||
local keepalive = consul_server.keepalive
|
||||
if keepalive then
|
||||
local ok, err = ngx_timer_at(0, _M.connect, consul_server, retry_delay)
|
||||
if not ok then
|
||||
log.error("create ngx_timer_at got error: ", err)
|
||||
return
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
local function format_consul_params(consul_conf)
|
||||
local consul_server_list = core.table.new(0, #consul_conf.servers)
|
||||
local args = {
|
||||
token = consul_conf.token,
|
||||
recurse = true
|
||||
}
|
||||
|
||||
if consul_conf.keepalive then
|
||||
args.wait = consul_conf.timeout.wait --blocked wait!=0; unblocked by wait=0
|
||||
args.index = 0
|
||||
end
|
||||
|
||||
for _, v in pairs(consul_conf.servers) do
|
||||
local scheme, host, port, path = unpack(http.parse_uri(nil, v))
|
||||
if scheme ~= "http" then
|
||||
return nil, "only support consul http schema address, eg: http://address:port"
|
||||
elseif path ~= "/" or core.string.has_suffix(v, '/') then
|
||||
return nil, "invalid consul server address, the valid format: http://address:port"
|
||||
end
|
||||
|
||||
core.table.insert(consul_server_list, {
|
||||
host = host,
|
||||
port = port,
|
||||
connect_timeout = consul_conf.timeout.connect,
|
||||
read_timeout = consul_conf.timeout.read,
|
||||
consul_key = "/kv/" .. consul_conf.prefix,
|
||||
server_name_key = v .. "/v1/kv/",
|
||||
weight = consul_conf.weight,
|
||||
keepalive = consul_conf.keepalive,
|
||||
default_args = args,
|
||||
index = 0,
|
||||
fetch_interval = consul_conf.fetch_interval -- fetch interval to next connect consul
|
||||
})
|
||||
end
|
||||
|
||||
return consul_server_list
|
||||
end
|
||||
|
||||
|
||||
function _M.init_worker()
|
||||
local consul_conf = local_conf.discovery.consul_kv
|
||||
|
||||
if consul_conf.dump then
|
||||
local dump = consul_conf.dump
|
||||
dump_params = dump
|
||||
|
||||
if dump.load_on_init then
|
||||
read_dump_srvs()
|
||||
end
|
||||
end
|
||||
|
||||
events = require("apisix.events")
|
||||
events_list = events:event_list(
|
||||
"discovery_consul_update_application",
|
||||
"updating"
|
||||
)
|
||||
|
||||
if 0 ~= ngx.worker.id() then
|
||||
events:register(discovery_consul_callback, events_list._source, events_list.updating)
|
||||
return
|
||||
end
|
||||
|
||||
log.notice("consul_conf: ", core.json.encode(consul_conf))
|
||||
default_weight = consul_conf.weight
|
||||
-- set default service, used when the server node cannot be found
|
||||
if consul_conf.default_service then
|
||||
default_service = consul_conf.default_service
|
||||
default_service.weight = default_weight
|
||||
end
|
||||
default_prefix_rule = "(" .. consul_conf.prefix .. "/.*/)([a-zA-Z0-9.]+):([0-9]+)"
|
||||
log.info("default params, default_weight: ", default_weight,
|
||||
", default_prefix_rule: ", default_prefix_rule)
|
||||
if consul_conf.skip_keys then
|
||||
skip_keys_map = core.table.new(0, #consul_conf.skip_keys)
|
||||
for _, v in ipairs(consul_conf.skip_keys) do
|
||||
skip_keys_map[v] = true
|
||||
end
|
||||
end
|
||||
|
||||
local consul_servers_list, err = format_consul_params(consul_conf)
|
||||
if err then
|
||||
error(err)
|
||||
return
|
||||
end
|
||||
log.info("consul_server_list: ", core.json.encode(consul_servers_list))
|
||||
|
||||
consul_apps = core.table.new(0, 1)
|
||||
-- success or failure
|
||||
for _, server in ipairs(consul_servers_list) do
|
||||
local ok, err = ngx_timer_at(0, _M.connect, server)
|
||||
if not ok then
|
||||
error("create consul_kv got error: " .. err)
|
||||
return
|
||||
end
|
||||
|
||||
if server.keepalive == false then
|
||||
ngx_timer_every(server.fetch_interval, _M.connect, server)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
function _M.dump_data()
|
||||
return {config = local_conf.discovery.consul_kv, services = applications}
|
||||
end
|
||||
|
||||
|
||||
function _M.control_api()
|
||||
return {
|
||||
{
|
||||
methods = {"GET"},
|
||||
uris = {"/show_dump_file"},
|
||||
handler = show_dump_file,
|
||||
}
|
||||
}
|
||||
end
|
||||
|
||||
|
||||
return _M
|
@@ -0,0 +1,88 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
return {
|
||||
type = "object",
|
||||
properties = {
|
||||
servers = {
|
||||
type = "array",
|
||||
minItems = 1,
|
||||
items = {
|
||||
type = "string",
|
||||
}
|
||||
},
|
||||
token = {type = "string", default = ""},
|
||||
fetch_interval = {type = "integer", minimum = 1, default = 3},
|
||||
keepalive = {
|
||||
type = "boolean",
|
||||
default = true
|
||||
},
|
||||
prefix = {type = "string", default = "upstreams"},
|
||||
weight = {type = "integer", minimum = 1, default = 1},
|
||||
timeout = {
|
||||
type = "object",
|
||||
properties = {
|
||||
connect = {type = "integer", minimum = 1, default = 2000},
|
||||
read = {type = "integer", minimum = 1, default = 2000},
|
||||
wait = {type = "integer", minimum = 1, default = 60}
|
||||
},
|
||||
default = {
|
||||
connect = 2000,
|
||||
read = 2000,
|
||||
wait = 60,
|
||||
}
|
||||
},
|
||||
skip_keys = {
|
||||
type = "array",
|
||||
minItems = 1,
|
||||
items = {
|
||||
type = "string",
|
||||
}
|
||||
},
|
||||
dump = {
|
||||
type = "object",
|
||||
properties = {
|
||||
path = {type = "string", minLength = 1},
|
||||
load_on_init = {type = "boolean", default = true},
|
||||
expire = {type = "integer", default = 0},
|
||||
},
|
||||
required = {"path"},
|
||||
},
|
||||
default_service = {
|
||||
type = "object",
|
||||
properties = {
|
||||
host = {type = "string"},
|
||||
port = {type = "integer"},
|
||||
metadata = {
|
||||
type = "object",
|
||||
properties = {
|
||||
fail_timeout = {type = "integer", default = 1},
|
||||
weight = {type = "integer", default = 1},
|
||||
max_fails = {type = "integer", default = 1}
|
||||
},
|
||||
default = {
|
||||
fail_timeout = 1,
|
||||
weight = 1,
|
||||
max_fails = 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
required = {"servers"}
|
||||
}
|
||||
|
@@ -0,0 +1,89 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
|
||||
local core = require("apisix.core")
|
||||
local config_local = require("apisix.core.config_local")
|
||||
local is_http = ngx.config.subsystem == "http"
|
||||
local ipairs = ipairs
|
||||
local error = error
|
||||
|
||||
|
||||
local dns_client
|
||||
local _M = {}
|
||||
|
||||
|
||||
function _M.nodes(service_name)
|
||||
local host, port = core.utils.parse_addr(service_name)
|
||||
core.log.info("discovery dns with host ", host, ", port ", port)
|
||||
|
||||
local records, err = dns_client:resolve(host, core.dns_client.RETURN_ALL)
|
||||
if not records then
|
||||
return nil, err
|
||||
end
|
||||
|
||||
local nodes = core.table.new(#records, 0)
|
||||
local index = 1
|
||||
for _, r in ipairs(records) do
|
||||
if r.address then
|
||||
local node_port = port
|
||||
if not node_port and r.port ~= 0 then
|
||||
-- if the port is zero, fallback to use the default
|
||||
node_port = r.port
|
||||
end
|
||||
|
||||
-- ignore zero port when subsystem is stream
|
||||
if node_port or is_http then
|
||||
nodes[index] = {host = r.address, weight = r.weight or 1, port = node_port}
|
||||
if r.priority then
|
||||
-- for SRV record, nodes with lower priority are chosen first
|
||||
nodes[index].priority = -r.priority
|
||||
end
|
||||
index = index + 1
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
return nodes
|
||||
end
|
||||
|
||||
|
||||
function _M.init_worker()
|
||||
local local_conf = config_local.local_conf()
|
||||
local servers = local_conf.discovery.dns.servers
|
||||
local resolv_conf = local_conf.discovery.dns.resolv_conf
|
||||
local default_order = {"last", "SRV", "A", "AAAA", "CNAME"}
|
||||
local order = core.table.try_read_attr(local_conf, "discovery", "dns", "order")
|
||||
order = order or default_order
|
||||
|
||||
local opts = {
|
||||
hosts = {},
|
||||
resolvConf = resolv_conf,
|
||||
nameservers = servers,
|
||||
order = order,
|
||||
}
|
||||
|
||||
local client, err = core.dns_client.new(opts)
|
||||
if not client then
|
||||
error("failed to init the dns client: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
dns_client = client
|
||||
end
|
||||
|
||||
|
||||
return _M
|
@@ -0,0 +1,48 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
return {
|
||||
type = "object",
|
||||
properties = {
|
||||
servers = {
|
||||
type = "array",
|
||||
minItems = 1,
|
||||
items = {
|
||||
type = "string",
|
||||
},
|
||||
},
|
||||
resolv_conf = {
|
||||
type = "string",
|
||||
},
|
||||
order = {
|
||||
type = "array",
|
||||
minItems = 1,
|
||||
maxItems = 5,
|
||||
uniqueItems = true,
|
||||
items = {
|
||||
enum = {"last", "SRV", "A", "AAAA", "CNAME"}
|
||||
},
|
||||
},
|
||||
},
|
||||
oneOf = {
|
||||
{
|
||||
required = {"servers"},
|
||||
},
|
||||
{
|
||||
required = {"resolv_conf"},
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,223 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
|
||||
local local_conf = require("apisix.core.config_local").local_conf()
|
||||
local http = require("resty.http")
|
||||
local core = require("apisix.core")
|
||||
local ipmatcher = require("resty.ipmatcher")
|
||||
local ipairs = ipairs
|
||||
local tostring = tostring
|
||||
local type = type
|
||||
local math_random = math.random
|
||||
local ngx = ngx
|
||||
local ngx_timer_at = ngx.timer.at
|
||||
local ngx_timer_every = ngx.timer.every
|
||||
local string_sub = string.sub
|
||||
local str_find = core.string.find
|
||||
local log = core.log
|
||||
|
||||
local default_weight
|
||||
local applications
|
||||
|
||||
|
||||
local _M = {
|
||||
version = 0.1,
|
||||
}
|
||||
|
||||
|
||||
local function service_info()
|
||||
local host = local_conf.discovery and
|
||||
local_conf.discovery.eureka and local_conf.discovery.eureka.host
|
||||
if not host then
|
||||
log.error("do not set eureka.host")
|
||||
return
|
||||
end
|
||||
|
||||
local basic_auth
|
||||
-- TODO Add health check to get healthy nodes.
|
||||
local url = host[math_random(#host)]
|
||||
local auth_idx = str_find(url, "@")
|
||||
if auth_idx then
|
||||
local protocol_idx = str_find(url, "://")
|
||||
local protocol = string_sub(url, 1, protocol_idx + 2)
|
||||
local user_and_password = string_sub(url, protocol_idx + 3, auth_idx - 1)
|
||||
local other = string_sub(url, auth_idx + 1)
|
||||
url = protocol .. other
|
||||
basic_auth = "Basic " .. ngx.encode_base64(user_and_password)
|
||||
end
|
||||
if local_conf.discovery.eureka.prefix then
|
||||
url = url .. local_conf.discovery.eureka.prefix
|
||||
end
|
||||
if string_sub(url, #url) ~= "/" then
|
||||
url = url .. "/"
|
||||
end
|
||||
|
||||
return url, basic_auth
|
||||
end
|
||||
|
||||
|
||||
local function request(request_uri, basic_auth, method, path, query, body)
|
||||
log.info("eureka uri:", request_uri, ".")
|
||||
local url = request_uri .. path
|
||||
local headers = core.table.new(0, 5)
|
||||
headers['Connection'] = 'Keep-Alive'
|
||||
headers['Accept'] = 'application/json'
|
||||
|
||||
if basic_auth then
|
||||
headers['Authorization'] = basic_auth
|
||||
end
|
||||
|
||||
if body and 'table' == type(body) then
|
||||
local err
|
||||
body, err = core.json.encode(body)
|
||||
if not body then
|
||||
return nil, 'invalid body : ' .. err
|
||||
end
|
||||
-- log.warn(method, url, body)
|
||||
headers['Content-Type'] = 'application/json'
|
||||
end
|
||||
|
||||
local httpc = http.new()
|
||||
local timeout = local_conf.discovery.eureka.timeout
|
||||
local connect_timeout = timeout and timeout.connect or 2000
|
||||
local send_timeout = timeout and timeout.send or 2000
|
||||
local read_timeout = timeout and timeout.read or 5000
|
||||
log.info("connect_timeout:", connect_timeout, ", send_timeout:", send_timeout,
|
||||
", read_timeout:", read_timeout, ".")
|
||||
httpc:set_timeouts(connect_timeout, send_timeout, read_timeout)
|
||||
return httpc:request_uri(url, {
|
||||
version = 1.1,
|
||||
method = method,
|
||||
headers = headers,
|
||||
query = query,
|
||||
body = body,
|
||||
ssl_verify = false,
|
||||
})
|
||||
end
|
||||
|
||||
|
||||
local function parse_instance(instance)
|
||||
local status = instance.status
|
||||
local overridden_status = instance.overriddenstatus or instance.overriddenStatus
|
||||
if overridden_status and overridden_status ~= "UNKNOWN" then
|
||||
status = overridden_status
|
||||
end
|
||||
|
||||
if status ~= "UP" then
|
||||
return
|
||||
end
|
||||
local port
|
||||
if tostring(instance.port["@enabled"]) == "true" and instance.port["$"] then
|
||||
port = instance.port["$"]
|
||||
-- secure = false
|
||||
end
|
||||
if tostring(instance.securePort["@enabled"]) == "true" and instance.securePort["$"] then
|
||||
port = instance.securePort["$"]
|
||||
-- secure = true
|
||||
end
|
||||
local ip = instance.ipAddr
|
||||
if not ipmatcher.parse_ipv4(ip) and
|
||||
not ipmatcher.parse_ipv6(ip) then
|
||||
log.error(instance.app, " service ", instance.hostName, " node IP ", ip,
|
||||
" is invalid(must be IPv4 or IPv6).")
|
||||
return
|
||||
end
|
||||
return ip, port, instance.metadata
|
||||
end
|
||||
|
||||
|
||||
local function fetch_full_registry(premature)
|
||||
if premature then
|
||||
return
|
||||
end
|
||||
|
||||
local request_uri, basic_auth = service_info()
|
||||
if not request_uri then
|
||||
return
|
||||
end
|
||||
|
||||
local res, err = request(request_uri, basic_auth, "GET", "apps")
|
||||
if not res then
|
||||
log.error("failed to fetch registry", err)
|
||||
return
|
||||
end
|
||||
|
||||
if not res.body or res.status ~= 200 then
|
||||
log.error("failed to fetch registry, status = ", res.status)
|
||||
return
|
||||
end
|
||||
|
||||
local json_str = res.body
|
||||
local data, err = core.json.decode(json_str)
|
||||
if not data then
|
||||
log.error("invalid response body: ", json_str, " err: ", err)
|
||||
return
|
||||
end
|
||||
local apps = data.applications.application
|
||||
local up_apps = core.table.new(0, #apps)
|
||||
for _, app in ipairs(apps) do
|
||||
for _, instance in ipairs(app.instance) do
|
||||
local ip, port, metadata = parse_instance(instance)
|
||||
if ip and port then
|
||||
local nodes = up_apps[app.name]
|
||||
if not nodes then
|
||||
nodes = core.table.new(#app.instance, 0)
|
||||
up_apps[app.name] = nodes
|
||||
end
|
||||
core.table.insert(nodes, {
|
||||
host = ip,
|
||||
port = port,
|
||||
weight = metadata and metadata.weight or default_weight,
|
||||
metadata = metadata,
|
||||
})
|
||||
if metadata then
|
||||
-- remove useless data
|
||||
metadata.weight = nil
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
applications = up_apps
|
||||
end
|
||||
|
||||
|
||||
function _M.nodes(service_name)
|
||||
if not applications then
|
||||
log.error("failed to fetch nodes for : ", service_name)
|
||||
return
|
||||
end
|
||||
|
||||
return applications[service_name]
|
||||
end
|
||||
|
||||
|
||||
function _M.init_worker()
|
||||
default_weight = local_conf.discovery.eureka.weight or 100
|
||||
log.info("default_weight:", default_weight, ".")
|
||||
local fetch_interval = local_conf.discovery.eureka.fetch_interval or 30
|
||||
log.info("fetch_interval:", fetch_interval, ".")
|
||||
ngx_timer_at(0, fetch_full_registry)
|
||||
ngx_timer_every(fetch_interval, fetch_full_registry)
|
||||
end
|
||||
|
||||
|
||||
function _M.dump_data()
|
||||
return {config = local_conf.discovery.eureka, services = applications or {}}
|
||||
end
|
||||
|
||||
|
||||
return _M
|
@@ -0,0 +1,40 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
return {
|
||||
type = "object",
|
||||
properties = {
|
||||
host = {
|
||||
type = "array",
|
||||
minItems = 1,
|
||||
items = {
|
||||
type = "string",
|
||||
},
|
||||
},
|
||||
fetch_interval = {type = "integer", minimum = 1, default = 30},
|
||||
prefix = {type = "string"},
|
||||
weight = {type = "integer", minimum = 0},
|
||||
timeout = {
|
||||
type = "object",
|
||||
properties = {
|
||||
connect = {type = "integer", minimum = 1, default = 2000},
|
||||
send = {type = "integer", minimum = 1, default = 2000},
|
||||
read = {type = "integer", minimum = 1, default = 5000},
|
||||
}
|
||||
},
|
||||
},
|
||||
required = {"host"}
|
||||
}
|
@@ -0,0 +1,43 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
|
||||
local log = require("apisix.core.log")
|
||||
local local_conf = require("apisix.core.config_local").local_conf()
|
||||
local pairs = pairs
|
||||
|
||||
local discovery_type = local_conf.discovery
|
||||
local discovery = {}
|
||||
|
||||
if discovery_type then
|
||||
for discovery_name, _ in pairs(discovery_type) do
|
||||
log.info("use discovery: ", discovery_name)
|
||||
discovery[discovery_name] = require("apisix.discovery." .. discovery_name)
|
||||
end
|
||||
end
|
||||
|
||||
function discovery.init_worker()
|
||||
if discovery_type then
|
||||
for discovery_name, _ in pairs(discovery_type) do
|
||||
discovery[discovery_name].init_worker()
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
return {
|
||||
version = 0.1,
|
||||
discovery = discovery
|
||||
}
|
@@ -0,0 +1,377 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
|
||||
local ngx = ngx
|
||||
local ipairs = ipairs
|
||||
local string = string
|
||||
local math = math
|
||||
local type = type
|
||||
local core = require("apisix.core")
|
||||
local http = require("resty.http")
|
||||
|
||||
local function list_query(informer)
|
||||
local arguments = {
|
||||
limit = informer.limit,
|
||||
}
|
||||
|
||||
if informer.continue and informer.continue ~= "" then
|
||||
arguments.continue = informer.continue
|
||||
end
|
||||
|
||||
if informer.label_selector and informer.label_selector ~= "" then
|
||||
arguments.labelSelector = informer.label_selector
|
||||
end
|
||||
|
||||
if informer.field_selector and informer.field_selector ~= "" then
|
||||
arguments.fieldSelector = informer.field_selector
|
||||
end
|
||||
|
||||
return ngx.encode_args(arguments)
|
||||
end
|
||||
|
||||
|
||||
local function list(httpc, apiserver, informer)
|
||||
local response, err = httpc:request({
|
||||
path = informer.path,
|
||||
query = list_query(informer),
|
||||
headers = {
|
||||
["Host"] = apiserver.host .. ":" .. apiserver.port,
|
||||
["Authorization"] = "Bearer " .. apiserver.token,
|
||||
["Accept"] = "application/json",
|
||||
["Connection"] = "keep-alive"
|
||||
}
|
||||
})
|
||||
|
||||
core.log.info("--raw=", informer.path, "?", list_query(informer))
|
||||
|
||||
if not response then
|
||||
return false, "RequestError", err or ""
|
||||
end
|
||||
|
||||
if response.status ~= 200 then
|
||||
return false, response.reason, response:read_body() or ""
|
||||
end
|
||||
|
||||
local body, err = response:read_body()
|
||||
if err then
|
||||
return false, "ReadBodyError", err
|
||||
end
|
||||
|
||||
local data = core.json.decode(body)
|
||||
if not data or data.kind ~= informer.list_kind then
|
||||
return false, "UnexpectedBody", body
|
||||
end
|
||||
|
||||
informer.version = data.metadata.resourceVersion
|
||||
|
||||
if informer.on_added then
|
||||
for _, item in ipairs(data.items or {}) do
|
||||
informer:on_added(item, "list")
|
||||
end
|
||||
end
|
||||
|
||||
informer.continue = data.metadata.continue
|
||||
if informer.continue and informer.continue ~= "" then
|
||||
list(httpc, apiserver, informer)
|
||||
end
|
||||
|
||||
return true
|
||||
end
|
||||
|
||||
|
||||
local function watch_query(informer)
|
||||
local arguments = {
|
||||
watch = "true",
|
||||
allowWatchBookmarks = "true",
|
||||
timeoutSeconds = informer.overtime,
|
||||
}
|
||||
|
||||
if informer.version and informer.version ~= "" then
|
||||
arguments.resourceVersion = informer.version
|
||||
end
|
||||
|
||||
if informer.label_selector and informer.label_selector ~= "" then
|
||||
arguments.labelSelector = informer.label_selector
|
||||
end
|
||||
|
||||
if informer.field_selector and informer.field_selector ~= "" then
|
||||
arguments.fieldSelector = informer.field_selector
|
||||
end
|
||||
|
||||
return ngx.encode_args(arguments)
|
||||
end
|
||||
|
||||
|
||||
local function split_event (body, callback, ...)
|
||||
local gmatch_iterator, err = ngx.re.gmatch(body, "{\"type\":.*}\n", "jao")
|
||||
if not gmatch_iterator then
|
||||
return false, nil, "GmatchError", err
|
||||
end
|
||||
|
||||
local captures
|
||||
local captured_size = 0
|
||||
local ok, reason
|
||||
while true do
|
||||
captures, err = gmatch_iterator()
|
||||
|
||||
if err then
|
||||
return false, nil, "GmatchError", err
|
||||
end
|
||||
|
||||
if not captures then
|
||||
break
|
||||
end
|
||||
|
||||
captured_size = captured_size + #captures[0]
|
||||
|
||||
ok, reason, err = callback(captures[0], ...)
|
||||
if not ok then
|
||||
return false, nil, reason, err
|
||||
end
|
||||
end
|
||||
|
||||
local remainder_body
|
||||
if captured_size == #body then
|
||||
remainder_body = ""
|
||||
elseif captured_size == 0 then
|
||||
remainder_body = body
|
||||
elseif captured_size < #body then
|
||||
remainder_body = string.sub(body, captured_size + 1)
|
||||
end
|
||||
|
||||
return true, remainder_body
|
||||
end
|
||||
|
||||
|
||||
local function dispatch_event(event_string, informer)
|
||||
local event = core.json.decode(event_string)
|
||||
|
||||
if not event or not event.type or not event.object then
|
||||
return false, "UnexpectedBody", event_string
|
||||
end
|
||||
|
||||
local tp = event.type
|
||||
|
||||
if tp == "ERROR" then
|
||||
if event.object.code == 410 then
|
||||
return false, "ResourceGone", nil
|
||||
end
|
||||
return false, "UnexpectedBody", event_string
|
||||
end
|
||||
|
||||
local object = event.object
|
||||
informer.version = object.metadata.resourceVersion
|
||||
|
||||
if tp == "ADDED" then
|
||||
if informer.on_added then
|
||||
informer:on_added(object, "watch")
|
||||
end
|
||||
elseif tp == "DELETED" then
|
||||
if informer.on_deleted then
|
||||
informer:on_deleted(object)
|
||||
end
|
||||
elseif tp == "MODIFIED" then
|
||||
if informer.on_modified then
|
||||
informer:on_modified(object)
|
||||
end
|
||||
-- elseif type == "BOOKMARK" then
|
||||
-- do nothing
|
||||
end
|
||||
|
||||
return true
|
||||
end
|
||||
|
||||
|
||||
local function watch(httpc, apiserver, informer)
|
||||
local watch_times = 8
|
||||
for _ = 1, watch_times do
|
||||
local watch_seconds = 1800 + math.random(9, 999)
|
||||
informer.overtime = watch_seconds
|
||||
local http_seconds = watch_seconds + 120
|
||||
httpc:set_timeouts(2000, 3000, http_seconds * 1000)
|
||||
|
||||
local response, err = httpc:request({
|
||||
path = informer.path,
|
||||
query = watch_query(informer),
|
||||
headers = {
|
||||
["Host"] = apiserver.host .. ":" .. apiserver.port,
|
||||
["Authorization"] = "Bearer " .. apiserver.token,
|
||||
["Accept"] = "application/json",
|
||||
["Connection"] = "keep-alive"
|
||||
}
|
||||
})
|
||||
|
||||
core.log.info("--raw=", informer.path, "?", watch_query(informer))
|
||||
|
||||
if err then
|
||||
return false, "RequestError", err
|
||||
end
|
||||
|
||||
if response.status ~= 200 then
|
||||
return false, response.reason, response:read_body() or ""
|
||||
end
|
||||
|
||||
local ok
|
||||
local remainder_body
|
||||
local body
|
||||
local reason
|
||||
|
||||
while true do
|
||||
body, err = response.body_reader()
|
||||
if err then
|
||||
return false, "ReadBodyError", err
|
||||
end
|
||||
|
||||
if not body then
|
||||
break
|
||||
end
|
||||
|
||||
if remainder_body and #remainder_body > 0 then
|
||||
body = remainder_body .. body
|
||||
end
|
||||
|
||||
ok, remainder_body, reason, err = split_event(body, dispatch_event, informer)
|
||||
if not ok then
|
||||
if reason == "ResourceGone" then
|
||||
return true
|
||||
end
|
||||
return false, reason, err
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
return true
|
||||
end
|
||||
|
||||
|
||||
local function list_watch(informer, apiserver)
|
||||
local ok
|
||||
local reason, message
|
||||
local httpc = http.new()
|
||||
|
||||
informer.continue = ""
|
||||
informer.version = ""
|
||||
|
||||
informer.fetch_state = "connecting"
|
||||
core.log.info("begin to connect ", apiserver.host, ":", apiserver.port)
|
||||
|
||||
ok, message = httpc:connect({
|
||||
scheme = apiserver.schema,
|
||||
host = apiserver.host,
|
||||
port = apiserver.port,
|
||||
ssl_verify = false
|
||||
})
|
||||
|
||||
if not ok then
|
||||
informer.fetch_state = "connect failed"
|
||||
core.log.error("connect apiserver failed, apiserver.host: ", apiserver.host,
|
||||
", apiserver.port: ", apiserver.port, ", message : ", message)
|
||||
return false
|
||||
end
|
||||
|
||||
core.log.info("begin to list ", informer.kind)
|
||||
informer.fetch_state = "listing"
|
||||
if informer.pre_List then
|
||||
informer:pre_list()
|
||||
end
|
||||
|
||||
ok, reason, message = list(httpc, apiserver, informer)
|
||||
if not ok then
|
||||
informer.fetch_state = "list failed"
|
||||
core.log.error("list failed, kind: ", informer.kind,
|
||||
", reason: ", reason, ", message : ", message)
|
||||
return false
|
||||
end
|
||||
|
||||
informer.fetch_state = "list finished"
|
||||
if informer.post_List then
|
||||
informer:post_list()
|
||||
end
|
||||
|
||||
core.log.info("begin to watch ", informer.kind)
|
||||
informer.fetch_state = "watching"
|
||||
ok, reason, message = watch(httpc, apiserver, informer)
|
||||
if not ok then
|
||||
informer.fetch_state = "watch failed"
|
||||
core.log.error("watch failed, kind: ", informer.kind,
|
||||
", reason: ", reason, ", message : ", message)
|
||||
return false
|
||||
end
|
||||
|
||||
informer.fetch_state = "watch finished"
|
||||
|
||||
return true
|
||||
end
|
||||
|
||||
local _M = {
|
||||
}
|
||||
|
||||
function _M.new(group, version, kind, plural, namespace)
|
||||
local tp
|
||||
tp = type(group)
|
||||
if tp ~= "nil" and tp ~= "string" then
|
||||
return nil, "group should set to string or nil type but " .. tp
|
||||
end
|
||||
|
||||
tp = type(namespace)
|
||||
if tp ~= "nil" and tp ~= "string" then
|
||||
return nil, "namespace should set to string or nil type but " .. tp
|
||||
end
|
||||
|
||||
tp = type(version)
|
||||
if tp ~= "string" or version == "" then
|
||||
return nil, "version should set to non-empty string"
|
||||
end
|
||||
|
||||
tp = type(kind)
|
||||
if tp ~= "string" or kind == "" then
|
||||
return nil, "kind should set to non-empty string"
|
||||
end
|
||||
|
||||
tp = type(plural)
|
||||
if tp ~= "string" or plural == "" then
|
||||
return nil, "plural should set to non-empty string"
|
||||
end
|
||||
|
||||
local path = ""
|
||||
if group == nil or group == "" then
|
||||
path = path .. "/api/" .. version
|
||||
else
|
||||
path = path .. "/apis/" .. group .. "/" .. version
|
||||
end
|
||||
|
||||
if namespace and namespace ~= "" then
|
||||
path = path .. "/namespaces/" .. namespace
|
||||
end
|
||||
path = path .. "/" .. plural
|
||||
|
||||
return {
|
||||
kind = kind,
|
||||
list_kind = kind .. "List",
|
||||
plural = plural,
|
||||
path = path,
|
||||
limit = 120,
|
||||
label_selector = "",
|
||||
field_selector = "",
|
||||
overtime = "1800",
|
||||
version = "",
|
||||
continue = "",
|
||||
list_watch = list_watch
|
||||
}
|
||||
end
|
||||
|
||||
return _M
|
@@ -0,0 +1,694 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
|
||||
local ngx = ngx
|
||||
local ipairs = ipairs
|
||||
local pairs = pairs
|
||||
local string = string
|
||||
local tonumber = tonumber
|
||||
local tostring = tostring
|
||||
local os = os
|
||||
local error = error
|
||||
local pcall = pcall
|
||||
local setmetatable = setmetatable
|
||||
local is_http = ngx.config.subsystem == "http"
|
||||
local process = require("ngx.process")
|
||||
local core = require("apisix.core")
|
||||
local util = require("apisix.cli.util")
|
||||
local local_conf = require("apisix.core.config_local").local_conf()
|
||||
local informer_factory = require("apisix.discovery.kubernetes.informer_factory")
|
||||
|
||||
|
||||
local ctx
|
||||
|
||||
local endpoint_lrucache = core.lrucache.new({
|
||||
ttl = 300,
|
||||
count = 1024
|
||||
})
|
||||
|
||||
local endpoint_buffer = {}
|
||||
|
||||
local function sort_nodes_cmp(left, right)
|
||||
if left.host ~= right.host then
|
||||
return left.host < right.host
|
||||
end
|
||||
|
||||
return left.port < right.port
|
||||
end
|
||||
|
||||
local function on_endpoint_slices_modified(handle, endpoint)
|
||||
if handle.namespace_selector and
|
||||
not handle:namespace_selector(endpoint.metadata.namespace) then
|
||||
return
|
||||
end
|
||||
|
||||
core.log.debug(core.json.delay_encode(endpoint))
|
||||
core.table.clear(endpoint_buffer)
|
||||
|
||||
local endpointslices = endpoint.endpoints
|
||||
for _, endpointslice in ipairs(endpointslices or {}) do
|
||||
if endpointslice.addresses then
|
||||
local addresses = endpointslices.addresses
|
||||
for _, port in ipairs(endpoint.ports or {}) do
|
||||
local port_name
|
||||
if port.name then
|
||||
port_name = port.name
|
||||
elseif port.targetPort then
|
||||
port_name = tostring(port.targetPort)
|
||||
else
|
||||
port_name = tostring(port.port)
|
||||
end
|
||||
|
||||
if endpointslice.conditions and endpointslice.condition.ready then
|
||||
local nodes = endpoint_buffer[port_name]
|
||||
if nodes == nil then
|
||||
nodes = core.table.new(0, #endpointslices * #addresses)
|
||||
endpoint_buffer[port_name] = nodes
|
||||
end
|
||||
|
||||
for _, address in ipairs(endpointslices.addresses) do
|
||||
core.table.insert(nodes, {
|
||||
host = address.ip,
|
||||
port = port.port,
|
||||
weight = handle.default_weight
|
||||
})
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
for _, ports in pairs(endpoint_buffer) do
|
||||
for _, nodes in pairs(ports) do
|
||||
core.table.sort(nodes, sort_nodes_cmp)
|
||||
end
|
||||
end
|
||||
local endpoint_key = endpoint.metadata.namespace .. "/" .. endpoint.metadata.name
|
||||
local endpoint_content = core.json.encode(endpoint_buffer, true)
|
||||
local endpoint_version = ngx.crc32_long(endpoint_content)
|
||||
|
||||
local _, err
|
||||
_, err = handle.endpoint_dict:safe_set(endpoint_key .. "#version", endpoint_version)
|
||||
if err then
|
||||
core.log.error("set endpoint version into discovery DICT failed, ", err)
|
||||
return
|
||||
end
|
||||
_, err = handle.endpoint_dict:safe_set(endpoint_key, endpoint_content)
|
||||
if err then
|
||||
core.log.error("set endpoint into discovery DICT failed, ", err)
|
||||
handle.endpoint_dict:delete(endpoint_key .. "#version")
|
||||
end
|
||||
end
|
||||
|
||||
local function on_endpoint_modified(handle, endpoint)
|
||||
if handle.namespace_selector and
|
||||
not handle:namespace_selector(endpoint.metadata.namespace) then
|
||||
return
|
||||
end
|
||||
|
||||
core.log.debug(core.json.delay_encode(endpoint))
|
||||
core.table.clear(endpoint_buffer)
|
||||
|
||||
local subsets = endpoint.subsets
|
||||
for _, subset in ipairs(subsets or {}) do
|
||||
if subset.addresses then
|
||||
local addresses = subset.addresses
|
||||
for _, port in ipairs(subset.ports or {}) do
|
||||
local port_name
|
||||
if port.name then
|
||||
port_name = port.name
|
||||
elseif port.targetPort then
|
||||
port_name = tostring(port.targetPort)
|
||||
else
|
||||
port_name = tostring(port.port)
|
||||
end
|
||||
|
||||
local nodes = endpoint_buffer[port_name]
|
||||
if nodes == nil then
|
||||
nodes = core.table.new(0, #subsets * #addresses)
|
||||
endpoint_buffer[port_name] = nodes
|
||||
end
|
||||
|
||||
for _, address in ipairs(subset.addresses) do
|
||||
core.table.insert(nodes, {
|
||||
host = address.ip,
|
||||
port = port.port,
|
||||
weight = handle.default_weight
|
||||
})
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
for _, ports in pairs(endpoint_buffer) do
|
||||
for _, nodes in pairs(ports) do
|
||||
core.table.sort(nodes, sort_nodes_cmp)
|
||||
end
|
||||
end
|
||||
|
||||
local endpoint_key = endpoint.metadata.namespace .. "/" .. endpoint.metadata.name
|
||||
local endpoint_content = core.json.encode(endpoint_buffer, true)
|
||||
local endpoint_version = ngx.crc32_long(endpoint_content)
|
||||
|
||||
local _, err
|
||||
_, err = handle.endpoint_dict:safe_set(endpoint_key .. "#version", endpoint_version)
|
||||
if err then
|
||||
core.log.error("set endpoint version into discovery DICT failed, ", err)
|
||||
return
|
||||
end
|
||||
_, err = handle.endpoint_dict:safe_set(endpoint_key, endpoint_content)
|
||||
if err then
|
||||
core.log.error("set endpoint into discovery DICT failed, ", err)
|
||||
handle.endpoint_dict:delete(endpoint_key .. "#version")
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
local function on_endpoint_deleted(handle, endpoint)
|
||||
if handle.namespace_selector and
|
||||
not handle:namespace_selector(endpoint.metadata.namespace) then
|
||||
return
|
||||
end
|
||||
|
||||
core.log.debug(core.json.delay_encode(endpoint))
|
||||
local endpoint_key = endpoint.metadata.namespace .. "/" .. endpoint.metadata.name
|
||||
handle.endpoint_dict:delete(endpoint_key .. "#version")
|
||||
handle.endpoint_dict:delete(endpoint_key)
|
||||
end
|
||||
|
||||
|
||||
local function pre_list(handle)
|
||||
handle.endpoint_dict:flush_all()
|
||||
end
|
||||
|
||||
|
||||
local function post_list(handle)
|
||||
handle.endpoint_dict:flush_expired()
|
||||
end
|
||||
|
||||
|
||||
local function setup_label_selector(conf, informer)
|
||||
informer.label_selector = conf.label_selector
|
||||
end
|
||||
|
||||
|
||||
local function setup_namespace_selector(conf, informer)
|
||||
local ns = conf.namespace_selector
|
||||
if ns == nil then
|
||||
informer.namespace_selector = nil
|
||||
return
|
||||
end
|
||||
|
||||
if ns.equal then
|
||||
informer.field_selector = "metadata.namespace=" .. ns.equal
|
||||
informer.namespace_selector = nil
|
||||
return
|
||||
end
|
||||
|
||||
if ns.not_equal then
|
||||
informer.field_selector = "metadata.namespace!=" .. ns.not_equal
|
||||
informer.namespace_selector = nil
|
||||
return
|
||||
end
|
||||
|
||||
if ns.match then
|
||||
informer.namespace_selector = function(self, namespace)
|
||||
local match = conf.namespace_selector.match
|
||||
local m, err
|
||||
for _, v in ipairs(match) do
|
||||
m, err = ngx.re.match(namespace, v, "jo")
|
||||
if m and m[0] == namespace then
|
||||
return true
|
||||
end
|
||||
if err then
|
||||
core.log.error("ngx.re.match failed: ", err)
|
||||
end
|
||||
end
|
||||
return false
|
||||
end
|
||||
return
|
||||
end
|
||||
|
||||
if ns.not_match then
|
||||
informer.namespace_selector = function(self, namespace)
|
||||
local not_match = conf.namespace_selector.not_match
|
||||
local m, err
|
||||
for _, v in ipairs(not_match) do
|
||||
m, err = ngx.re.match(namespace, v, "jo")
|
||||
if m and m[0] == namespace then
|
||||
return false
|
||||
end
|
||||
if err then
|
||||
return false
|
||||
end
|
||||
end
|
||||
return true
|
||||
end
|
||||
return
|
||||
end
|
||||
|
||||
return
|
||||
end
|
||||
|
||||
|
||||
local function read_env(key)
|
||||
if #key > 3 then
|
||||
local first, second = string.byte(key, 1, 2)
|
||||
if first == string.byte('$') and second == string.byte('{') then
|
||||
local last = string.byte(key, #key)
|
||||
if last == string.byte('}') then
|
||||
local env = string.sub(key, 3, #key - 1)
|
||||
local value = os.getenv(env)
|
||||
if not value then
|
||||
return nil, "not found environment variable " .. env
|
||||
end
|
||||
return value
|
||||
end
|
||||
end
|
||||
end
|
||||
return key
|
||||
end
|
||||
|
||||
local function read_token(token_file)
|
||||
local token, err = util.read_file(token_file)
|
||||
if err then
|
||||
return nil, err
|
||||
end
|
||||
|
||||
-- remove possible extra whitespace
|
||||
return util.trim(token)
|
||||
end
|
||||
|
||||
local function get_apiserver(conf)
|
||||
local apiserver = {
|
||||
schema = "",
|
||||
host = "",
|
||||
port = "",
|
||||
}
|
||||
|
||||
apiserver.schema = conf.service.schema
|
||||
if apiserver.schema ~= "http" and apiserver.schema ~= "https" then
|
||||
return nil, "service.schema should set to one of [http,https] but " .. apiserver.schema
|
||||
end
|
||||
|
||||
local err
|
||||
apiserver.host, err = read_env(conf.service.host)
|
||||
if err then
|
||||
return nil, err
|
||||
end
|
||||
|
||||
if apiserver.host == "" then
|
||||
return nil, "service.host should set to non-empty string"
|
||||
end
|
||||
|
||||
local port
|
||||
port, err = read_env(conf.service.port)
|
||||
if err then
|
||||
return nil, err
|
||||
end
|
||||
|
||||
apiserver.port = tonumber(port)
|
||||
if not apiserver.port or apiserver.port <= 0 or apiserver.port > 65535 then
|
||||
return nil, "invalid port value: " .. apiserver.port
|
||||
end
|
||||
|
||||
if conf.client.token then
|
||||
local token, err = read_env(conf.client.token)
|
||||
if err then
|
||||
return nil, err
|
||||
end
|
||||
apiserver.token = util.trim(token)
|
||||
elseif conf.client.token_file and conf.client.token_file ~= "" then
|
||||
setmetatable(apiserver, {
|
||||
__index = function(_, key)
|
||||
if key ~= "token" then
|
||||
return
|
||||
end
|
||||
|
||||
local token_file, err = read_env(conf.client.token_file)
|
||||
if err then
|
||||
core.log.error("failed to read token file path: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local token, err = read_token(token_file)
|
||||
if err then
|
||||
core.log.error("failed to read token from file: ", err)
|
||||
return
|
||||
end
|
||||
core.log.debug("re-read the token value")
|
||||
return token
|
||||
end
|
||||
})
|
||||
else
|
||||
return nil, "one of [client.token,client.token_file] should be set but none"
|
||||
end
|
||||
|
||||
if apiserver.schema == "https" and apiserver.token == "" then
|
||||
return nil, "apiserver.token should set to non-empty string when service.schema is https"
|
||||
end
|
||||
|
||||
return apiserver
|
||||
end
|
||||
|
||||
local function create_endpoint_lrucache(endpoint_dict, endpoint_key, endpoint_port)
|
||||
local endpoint_content = endpoint_dict:get_stale(endpoint_key)
|
||||
if not endpoint_content then
|
||||
core.log.error("get empty endpoint content from discovery DIC, this should not happen ",
|
||||
endpoint_key)
|
||||
return nil
|
||||
end
|
||||
|
||||
local endpoint = core.json.decode(endpoint_content)
|
||||
if not endpoint then
|
||||
core.log.error("decode endpoint content failed, this should not happen, content: ",
|
||||
endpoint_content)
|
||||
return nil
|
||||
end
|
||||
|
||||
return endpoint[endpoint_port]
|
||||
end
|
||||
|
||||
|
||||
local _M = {
|
||||
version = "0.0.1"
|
||||
}
|
||||
|
||||
|
||||
local function start_fetch(handle)
|
||||
local timer_runner
|
||||
timer_runner = function(premature)
|
||||
if premature then
|
||||
return
|
||||
end
|
||||
|
||||
local ok, status = pcall(handle.list_watch, handle, handle.apiserver)
|
||||
|
||||
local retry_interval = 0
|
||||
if not ok then
|
||||
core.log.error("list_watch failed, kind: ", handle.kind,
|
||||
", reason: ", "RuntimeException", ", message : ", status)
|
||||
retry_interval = 40
|
||||
elseif not status then
|
||||
retry_interval = 40
|
||||
end
|
||||
|
||||
ngx.timer.at(retry_interval, timer_runner)
|
||||
end
|
||||
ngx.timer.at(0, timer_runner)
|
||||
end
|
||||
|
||||
local function get_endpoint_dict(id)
|
||||
local shm = "kubernetes"
|
||||
|
||||
if id and #id > 0 then
|
||||
shm = shm .. "-" .. id
|
||||
end
|
||||
|
||||
if not is_http then
|
||||
shm = shm .. "-stream"
|
||||
end
|
||||
|
||||
return ngx.shared[shm]
|
||||
end
|
||||
|
||||
|
||||
local function single_mode_init(conf)
|
||||
local endpoint_dict = get_endpoint_dict()
|
||||
|
||||
if not endpoint_dict then
|
||||
error("failed to get lua_shared_dict: ngx.shared.kubernetes, " ..
|
||||
"please check your APISIX version")
|
||||
end
|
||||
|
||||
if process.type() ~= "privileged agent" then
|
||||
ctx = endpoint_dict
|
||||
return
|
||||
end
|
||||
|
||||
local apiserver, err = get_apiserver(conf)
|
||||
if err then
|
||||
error(err)
|
||||
return
|
||||
end
|
||||
|
||||
local default_weight = conf.default_weight
|
||||
local endpoints_informer, err
|
||||
if conf.watch_endpoint_slices_schema then
|
||||
endpoints_informer, err = informer_factory.new("discovery.k8s.io", "v1",
|
||||
"EndpointSlice", "endpointslices", "")
|
||||
else
|
||||
endpoints_informer, err = informer_factory.new("", "v1", "Endpoints", "endpoints", "")
|
||||
end
|
||||
if err then
|
||||
error(err)
|
||||
return
|
||||
end
|
||||
|
||||
setup_namespace_selector(conf, endpoints_informer)
|
||||
setup_label_selector(conf, endpoints_informer)
|
||||
|
||||
if conf.watch_endpoint_slices_schema then
|
||||
endpoints_informer.on_added = on_endpoint_slices_modified
|
||||
endpoints_informer.on_modified = on_endpoint_slices_modified
|
||||
else
|
||||
endpoints_informer.on_added = on_endpoint_modified
|
||||
endpoints_informer.on_modified = on_endpoint_modified
|
||||
end
|
||||
endpoints_informer.on_deleted = on_endpoint_deleted
|
||||
endpoints_informer.pre_list = pre_list
|
||||
endpoints_informer.post_list = post_list
|
||||
|
||||
ctx = setmetatable({
|
||||
endpoint_dict = endpoint_dict,
|
||||
apiserver = apiserver,
|
||||
default_weight = default_weight
|
||||
}, { __index = endpoints_informer })
|
||||
|
||||
start_fetch(ctx)
|
||||
end
|
||||
|
||||
|
||||
local function single_mode_nodes(service_name)
|
||||
local pattern = "^(.*):(.*)$" -- namespace/name:port_name
|
||||
local match = ngx.re.match(service_name, pattern, "jo")
|
||||
if not match then
|
||||
core.log.error("get unexpected upstream service_name: ", service_name)
|
||||
return nil
|
||||
end
|
||||
|
||||
local endpoint_dict = ctx
|
||||
local endpoint_key = match[1]
|
||||
local endpoint_port = match[2]
|
||||
local endpoint_version = endpoint_dict:get_stale(endpoint_key .. "#version")
|
||||
if not endpoint_version then
|
||||
core.log.info("get empty endpoint version from discovery DICT ", endpoint_key)
|
||||
return nil
|
||||
end
|
||||
|
||||
return endpoint_lrucache(service_name, endpoint_version,
|
||||
create_endpoint_lrucache, endpoint_dict, endpoint_key, endpoint_port)
|
||||
end
|
||||
|
||||
|
||||
local function multiple_mode_worker_init(confs)
|
||||
for _, conf in ipairs(confs) do
|
||||
|
||||
local id = conf.id
|
||||
if ctx[id] then
|
||||
error("duplicate id value")
|
||||
end
|
||||
|
||||
local endpoint_dict = get_endpoint_dict(id)
|
||||
if not endpoint_dict then
|
||||
error(string.format("failed to get lua_shared_dict: ngx.shared.kubernetes-%s, ", id) ..
|
||||
"please check your APISIX version")
|
||||
end
|
||||
|
||||
ctx[id] = endpoint_dict
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
local function multiple_mode_init(confs)
|
||||
ctx = core.table.new(#confs, 0)
|
||||
|
||||
if process.type() ~= "privileged agent" then
|
||||
multiple_mode_worker_init(confs)
|
||||
return
|
||||
end
|
||||
|
||||
for _, conf in ipairs(confs) do
|
||||
local id = conf.id
|
||||
|
||||
if ctx[id] then
|
||||
error("duplicate id value")
|
||||
end
|
||||
|
||||
local endpoint_dict = get_endpoint_dict(id)
|
||||
if not endpoint_dict then
|
||||
error(string.format("failed to get lua_shared_dict: ngx.shared.kubernetes-%s, ", id) ..
|
||||
"please check your APISIX version")
|
||||
end
|
||||
|
||||
local apiserver, err = get_apiserver(conf)
|
||||
if err then
|
||||
error(err)
|
||||
return
|
||||
end
|
||||
|
||||
local default_weight = conf.default_weight
|
||||
|
||||
local endpoints_informer, err
|
||||
if conf.watch_endpoint_slices_schema then
|
||||
endpoints_informer, err = informer_factory.new("discovery.k8s.io", "v1",
|
||||
"EndpointSlice", "endpointslices", "")
|
||||
else
|
||||
endpoints_informer, err = informer_factory.new("", "v1", "Endpoints", "endpoints", "")
|
||||
end
|
||||
if err then
|
||||
error(err)
|
||||
return
|
||||
end
|
||||
|
||||
setup_namespace_selector(conf, endpoints_informer)
|
||||
setup_label_selector(conf, endpoints_informer)
|
||||
|
||||
if conf.watch_endpoint_slices_schema then
|
||||
endpoints_informer.on_added = on_endpoint_slices_modified
|
||||
endpoints_informer.on_modified = on_endpoint_slices_modified
|
||||
else
|
||||
endpoints_informer.on_added = on_endpoint_modified
|
||||
endpoints_informer.on_modified = on_endpoint_modified
|
||||
end
|
||||
endpoints_informer.on_deleted = on_endpoint_deleted
|
||||
endpoints_informer.pre_list = pre_list
|
||||
endpoints_informer.post_list = post_list
|
||||
|
||||
ctx[id] = setmetatable({
|
||||
endpoint_dict = endpoint_dict,
|
||||
apiserver = apiserver,
|
||||
default_weight = default_weight
|
||||
}, { __index = endpoints_informer })
|
||||
end
|
||||
|
||||
for _, item in pairs(ctx) do
|
||||
start_fetch(item)
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
local function multiple_mode_nodes(service_name)
|
||||
local pattern = "^(.*)/(.*/.*):(.*)$" -- id/namespace/name:port_name
|
||||
local match = ngx.re.match(service_name, pattern, "jo")
|
||||
if not match then
|
||||
core.log.error("get unexpected upstream service_name: ", service_name)
|
||||
return nil
|
||||
end
|
||||
|
||||
local id = match[1]
|
||||
local endpoint_dict = ctx[id]
|
||||
if not endpoint_dict then
|
||||
core.log.error("id not exist")
|
||||
return nil
|
||||
end
|
||||
|
||||
local endpoint_key = match[2]
|
||||
local endpoint_port = match[3]
|
||||
local endpoint_version = endpoint_dict:get_stale(endpoint_key .. "#version")
|
||||
if not endpoint_version then
|
||||
core.log.info("get empty endpoint version from discovery DICT ", endpoint_key)
|
||||
return nil
|
||||
end
|
||||
|
||||
return endpoint_lrucache(service_name, endpoint_version,
|
||||
create_endpoint_lrucache, endpoint_dict, endpoint_key, endpoint_port)
|
||||
end
|
||||
|
||||
|
||||
function _M.init_worker()
|
||||
local discovery_conf = local_conf.discovery.kubernetes
|
||||
core.log.info("kubernetes discovery conf: ", core.json.delay_encode(discovery_conf))
|
||||
if #discovery_conf == 0 then
|
||||
_M.nodes = single_mode_nodes
|
||||
single_mode_init(discovery_conf)
|
||||
else
|
||||
_M.nodes = multiple_mode_nodes
|
||||
multiple_mode_init(discovery_conf)
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
local function dump_endpoints_from_dict(endpoint_dict)
|
||||
local keys, err = endpoint_dict:get_keys(0)
|
||||
if err then
|
||||
core.log.error("get keys from discovery dict failed: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
if not keys or #keys == 0 then
|
||||
return
|
||||
end
|
||||
|
||||
local endpoints = {}
|
||||
for i = 1, #keys do
|
||||
local key = keys[i]
|
||||
-- skip key with suffix #version
|
||||
if key:sub(-#"#version") ~= "#version" then
|
||||
local value = endpoint_dict:get(key)
|
||||
core.table.insert(endpoints, {
|
||||
name = key,
|
||||
value = value
|
||||
})
|
||||
end
|
||||
end
|
||||
|
||||
return endpoints
|
||||
end
|
||||
|
||||
function _M.dump_data()
|
||||
local discovery_conf = local_conf.discovery.kubernetes
|
||||
local eps = {}
|
||||
|
||||
if #discovery_conf == 0 then
|
||||
-- Single mode: discovery_conf is a single configuration object
|
||||
local endpoint_dict = get_endpoint_dict()
|
||||
local endpoints = dump_endpoints_from_dict(endpoint_dict)
|
||||
if endpoints then
|
||||
core.table.insert(eps, {
|
||||
endpoints = endpoints
|
||||
})
|
||||
end
|
||||
else
|
||||
-- Multiple mode: discovery_conf is an array of configuration objects
|
||||
for _, conf in ipairs(discovery_conf) do
|
||||
local endpoint_dict = get_endpoint_dict(conf.id)
|
||||
local endpoints = dump_endpoints_from_dict(endpoint_dict)
|
||||
if endpoints then
|
||||
core.table.insert(eps, {
|
||||
id = conf.id,
|
||||
endpoints = endpoints
|
||||
})
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
return {config = discovery_conf, endpoints = eps}
|
||||
end
|
||||
|
||||
|
||||
return _M
|
@@ -0,0 +1,217 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
|
||||
local host_patterns = {
|
||||
{ pattern = [[^\${[_A-Za-z]([_A-Za-z0-9]*[_A-Za-z])*}$]] },
|
||||
{ pattern = [[^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$]] },
|
||||
}
|
||||
|
||||
local port_patterns = {
|
||||
{ pattern = [[^\${[_A-Za-z]([_A-Za-z0-9]*[_A-Za-z])*}$]] },
|
||||
{ pattern = [[^(([1-9]\d{0,3}|[1-5]\d{4}|6[0-4]\d{3}|65[0-4]\d{2}|655[0-2]\d|6553[0-5]))$]] },
|
||||
}
|
||||
|
||||
local schema_schema = {
|
||||
type = "string",
|
||||
enum = { "http", "https" },
|
||||
default = "https",
|
||||
}
|
||||
|
||||
local token_patterns = {
|
||||
{ pattern = [[\${[_A-Za-z]([_A-Za-z0-9]*[_A-Za-z])*}$]] },
|
||||
{ pattern = [[^[A-Za-z0-9+\/._=-]{0,4096}$]] },
|
||||
}
|
||||
|
||||
local token_schema = {
|
||||
type = "string",
|
||||
oneOf = token_patterns,
|
||||
}
|
||||
|
||||
local token_file_schema = {
|
||||
type = "string",
|
||||
pattern = [[^[^\:*?"<>|]*$]],
|
||||
minLength = 1,
|
||||
maxLength = 500,
|
||||
}
|
||||
|
||||
local namespace_pattern = [[^[a-z0-9]([-a-z0-9_.]*[a-z0-9])?$]]
|
||||
|
||||
local namespace_regex_pattern = [[^[\x21-\x7e]*$]]
|
||||
|
||||
local namespace_selector_schema = {
|
||||
type = "object",
|
||||
properties = {
|
||||
equal = {
|
||||
type = "string",
|
||||
pattern = namespace_pattern,
|
||||
},
|
||||
not_equal = {
|
||||
type = "string",
|
||||
pattern = namespace_pattern,
|
||||
},
|
||||
match = {
|
||||
type = "array",
|
||||
items = {
|
||||
type = "string",
|
||||
pattern = namespace_regex_pattern
|
||||
},
|
||||
minItems = 1
|
||||
},
|
||||
not_match = {
|
||||
type = "array",
|
||||
items = {
|
||||
type = "string",
|
||||
pattern = namespace_regex_pattern
|
||||
},
|
||||
minItems = 1
|
||||
},
|
||||
},
|
||||
oneOf = {
|
||||
{ required = {} },
|
||||
{ required = { "equal" } },
|
||||
{ required = { "not_equal" } },
|
||||
{ required = { "match" } },
|
||||
{ required = { "not_match" } }
|
||||
},
|
||||
}
|
||||
|
||||
local label_selector_schema = {
|
||||
type = "string",
|
||||
}
|
||||
|
||||
local default_weight_schema = {
|
||||
type = "integer",
|
||||
default = 50,
|
||||
minimum = 0,
|
||||
}
|
||||
|
||||
local shared_size_schema = {
|
||||
type = "string",
|
||||
pattern = [[^[1-9][0-9]*m$]],
|
||||
default = "1m",
|
||||
}
|
||||
|
||||
local watch_endpoint_slices_schema = {
|
||||
type = "boolean",
|
||||
default = false,
|
||||
}
|
||||
|
||||
return {
|
||||
anyOf = {
|
||||
{
|
||||
type = "object",
|
||||
properties = {
|
||||
service = {
|
||||
type = "object",
|
||||
properties = {
|
||||
schema = schema_schema,
|
||||
host = {
|
||||
type = "string",
|
||||
oneOf = host_patterns,
|
||||
default = "${KUBERNETES_SERVICE_HOST}",
|
||||
},
|
||||
port = {
|
||||
type = "string",
|
||||
oneOf = port_patterns,
|
||||
default = "${KUBERNETES_SERVICE_PORT}",
|
||||
},
|
||||
},
|
||||
default = {
|
||||
schema = "https",
|
||||
host = "${KUBERNETES_SERVICE_HOST}",
|
||||
port = "${KUBERNETES_SERVICE_PORT}",
|
||||
}
|
||||
},
|
||||
client = {
|
||||
type = "object",
|
||||
properties = {
|
||||
token = token_schema,
|
||||
token_file = token_file_schema,
|
||||
},
|
||||
default = {
|
||||
token_file = "/var/run/secrets/kubernetes.io/serviceaccount/token"
|
||||
},
|
||||
["if"] = {
|
||||
["not"] = {
|
||||
anyOf = {
|
||||
{ required = { "token" } },
|
||||
{ required = { "token_file" } },
|
||||
}
|
||||
}
|
||||
},
|
||||
["then"] = {
|
||||
properties = {
|
||||
token_file = {
|
||||
default = "/var/run/secrets/kubernetes.io/serviceaccount/token"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
namespace_selector = namespace_selector_schema,
|
||||
label_selector = label_selector_schema,
|
||||
default_weight = default_weight_schema,
|
||||
shared_size = shared_size_schema,
|
||||
watch_endpoint_slices = watch_endpoint_slices_schema,
|
||||
},
|
||||
},
|
||||
{
|
||||
type = "array",
|
||||
minItems = 1,
|
||||
items = {
|
||||
type = "object",
|
||||
properties = {
|
||||
id = {
|
||||
type = "string",
|
||||
pattern = [[^[a-z0-9]{1,8}$]]
|
||||
},
|
||||
service = {
|
||||
type = "object",
|
||||
properties = {
|
||||
schema = schema_schema,
|
||||
host = {
|
||||
type = "string",
|
||||
oneOf = host_patterns,
|
||||
},
|
||||
port = {
|
||||
type = "string",
|
||||
oneOf = port_patterns,
|
||||
},
|
||||
},
|
||||
required = { "host", "port" }
|
||||
},
|
||||
client = {
|
||||
type = "object",
|
||||
properties = {
|
||||
token = token_schema,
|
||||
token_file = token_file_schema,
|
||||
},
|
||||
oneOf = {
|
||||
{ required = { "token" } },
|
||||
{ required = { "token_file" } },
|
||||
},
|
||||
},
|
||||
namespace_selector = namespace_selector_schema,
|
||||
label_selector = label_selector_schema,
|
||||
default_weight = default_weight_schema,
|
||||
shared_size = shared_size_schema,
|
||||
watch_endpoint_slices = watch_endpoint_slices_schema,
|
||||
},
|
||||
required = { "id", "service", "client" }
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,392 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
|
||||
local require = require
|
||||
local local_conf = require('apisix.core.config_local').local_conf()
|
||||
local http = require('resty.http')
|
||||
local core = require('apisix.core')
|
||||
local ipairs = ipairs
|
||||
local pairs = pairs
|
||||
local type = type
|
||||
local math = math
|
||||
local math_random = math.random
|
||||
local ngx = ngx
|
||||
local ngx_re = require('ngx.re')
|
||||
local ngx_timer_at = ngx.timer.at
|
||||
local ngx_timer_every = ngx.timer.every
|
||||
local string = string
|
||||
local string_sub = string.sub
|
||||
local str_byte = string.byte
|
||||
local str_find = core.string.find
|
||||
local log = core.log
|
||||
|
||||
local default_weight
|
||||
local nacos_dict = ngx.shared.nacos --key: namespace_id.group_name.service_name
|
||||
if not nacos_dict then
|
||||
error("lua_shared_dict \"nacos\" not configured")
|
||||
end
|
||||
|
||||
local auth_path = 'auth/login'
|
||||
local instance_list_path = 'ns/instance/list?healthyOnly=true&serviceName='
|
||||
local default_namespace_id = "public"
|
||||
local default_group_name = "DEFAULT_GROUP"
|
||||
local access_key
|
||||
local secret_key
|
||||
|
||||
|
||||
local _M = {}
|
||||
|
||||
local function get_key(namespace_id, group_name, service_name)
|
||||
return namespace_id .. '.' .. group_name .. '.' .. service_name
|
||||
end
|
||||
|
||||
local function request(request_uri, path, body, method, basic_auth)
|
||||
local url = request_uri .. path
|
||||
log.info('request url:', url)
|
||||
local headers = {}
|
||||
headers['Accept'] = 'application/json'
|
||||
|
||||
if basic_auth then
|
||||
headers['Authorization'] = basic_auth
|
||||
end
|
||||
|
||||
if body and 'table' == type(body) then
|
||||
local err
|
||||
body, err = core.json.encode(body)
|
||||
if not body then
|
||||
return nil, 'invalid body : ' .. err
|
||||
end
|
||||
headers['Content-Type'] = 'application/json'
|
||||
end
|
||||
|
||||
local httpc = http.new()
|
||||
local timeout = local_conf.discovery.nacos.timeout
|
||||
local connect_timeout = timeout.connect
|
||||
local send_timeout = timeout.send
|
||||
local read_timeout = timeout.read
|
||||
log.info('connect_timeout:', connect_timeout, ', send_timeout:', send_timeout,
|
||||
', read_timeout:', read_timeout)
|
||||
httpc:set_timeouts(connect_timeout, send_timeout, read_timeout)
|
||||
local res, err = httpc:request_uri(url, {
|
||||
method = method,
|
||||
headers = headers,
|
||||
body = body,
|
||||
ssl_verify = true,
|
||||
})
|
||||
if not res then
|
||||
return nil, err
|
||||
end
|
||||
|
||||
if not res.body or res.status ~= 200 then
|
||||
return nil, 'status = ' .. res.status
|
||||
end
|
||||
|
||||
local json_str = res.body
|
||||
local data, err = core.json.decode(json_str)
|
||||
if not data then
|
||||
return nil, err
|
||||
end
|
||||
return data
|
||||
end
|
||||
|
||||
|
||||
local function get_url(request_uri, path)
|
||||
return request(request_uri, path, nil, 'GET', nil)
|
||||
end
|
||||
|
||||
|
||||
local function post_url(request_uri, path, body)
|
||||
return request(request_uri, path, body, 'POST', nil)
|
||||
end
|
||||
|
||||
|
||||
local function get_token_param(base_uri, username, password)
|
||||
if not username or not password then
|
||||
return ''
|
||||
end
|
||||
|
||||
local args = { username = username, password = password}
|
||||
local data, err = post_url(base_uri, auth_path .. '?' .. ngx.encode_args(args), nil)
|
||||
if err then
|
||||
log.error('nacos login fail:', username, ' ', password, ' desc:', err)
|
||||
return nil, err
|
||||
end
|
||||
return '&accessToken=' .. data.accessToken
|
||||
end
|
||||
|
||||
|
||||
local function get_namespace_param(namespace_id)
|
||||
local param = ''
|
||||
if namespace_id then
|
||||
local args = {namespaceId = namespace_id}
|
||||
param = '&' .. ngx.encode_args(args)
|
||||
end
|
||||
return param
|
||||
end
|
||||
|
||||
|
||||
local function get_group_name_param(group_name)
|
||||
local param = ''
|
||||
if group_name then
|
||||
local args = {groupName = group_name}
|
||||
param = '&' .. ngx.encode_args(args)
|
||||
end
|
||||
return param
|
||||
end
|
||||
|
||||
|
||||
local function get_signed_param(group_name, service_name)
|
||||
local param = ''
|
||||
if access_key ~= '' and secret_key ~= '' then
|
||||
local str_to_sign = ngx.now() * 1000 .. '@@' .. group_name .. '@@' .. service_name
|
||||
local args = {
|
||||
ak = access_key,
|
||||
data = str_to_sign,
|
||||
signature = ngx.encode_base64(ngx.hmac_sha1(secret_key, str_to_sign))
|
||||
}
|
||||
param = '&' .. ngx.encode_args(args)
|
||||
end
|
||||
return param
|
||||
end
|
||||
|
||||
|
||||
local function get_base_uri()
|
||||
local host = local_conf.discovery.nacos.host
|
||||
-- TODO Add health check to get healthy nodes.
|
||||
local url = host[math_random(#host)]
|
||||
local auth_idx = core.string.rfind_char(url, '@')
|
||||
local username, password
|
||||
if auth_idx then
|
||||
local protocol_idx = str_find(url, '://')
|
||||
local protocol = string_sub(url, 1, protocol_idx + 2)
|
||||
local user_and_password = string_sub(url, protocol_idx + 3, auth_idx - 1)
|
||||
local arr = ngx_re.split(user_and_password, ':')
|
||||
if #arr == 2 then
|
||||
username = arr[1]
|
||||
password = arr[2]
|
||||
end
|
||||
local other = string_sub(url, auth_idx + 1)
|
||||
url = protocol .. other
|
||||
end
|
||||
|
||||
if local_conf.discovery.nacos.prefix then
|
||||
url = url .. local_conf.discovery.nacos.prefix
|
||||
end
|
||||
|
||||
if str_byte(url, #url) ~= str_byte('/') then
|
||||
url = url .. '/'
|
||||
end
|
||||
|
||||
return url, username, password
|
||||
end
|
||||
|
||||
|
||||
local function de_duplication(services, namespace_id, group_name, service_name, scheme)
|
||||
for _, service in ipairs(services) do
|
||||
if service.namespace_id == namespace_id and service.group_name == group_name
|
||||
and service.service_name == service_name and service.scheme == scheme then
|
||||
return true
|
||||
end
|
||||
end
|
||||
return false
|
||||
end
|
||||
|
||||
|
||||
local function iter_and_add_service(services, values)
|
||||
if not values then
|
||||
return
|
||||
end
|
||||
|
||||
for _, value in core.config_util.iterate_values(values) do
|
||||
local conf = value.value
|
||||
if not conf then
|
||||
goto CONTINUE
|
||||
end
|
||||
|
||||
local up
|
||||
if conf.upstream then
|
||||
up = conf.upstream
|
||||
else
|
||||
up = conf
|
||||
end
|
||||
|
||||
local namespace_id = (up.discovery_args and up.discovery_args.namespace_id)
|
||||
or default_namespace_id
|
||||
|
||||
local group_name = (up.discovery_args and up.discovery_args.group_name)
|
||||
or default_group_name
|
||||
|
||||
local dup = de_duplication(services, namespace_id, group_name,
|
||||
up.service_name, up.scheme)
|
||||
if dup then
|
||||
goto CONTINUE
|
||||
end
|
||||
|
||||
if up.discovery_type == 'nacos' then
|
||||
core.table.insert(services, {
|
||||
service_name = up.service_name,
|
||||
namespace_id = namespace_id,
|
||||
group_name = group_name,
|
||||
scheme = up.scheme,
|
||||
})
|
||||
end
|
||||
::CONTINUE::
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
local function get_nacos_services()
|
||||
local services = {}
|
||||
|
||||
-- here we use lazy load to work around circle dependency
|
||||
local get_upstreams = require('apisix.upstream').upstreams
|
||||
local get_routes = require('apisix.router').http_routes
|
||||
local get_stream_routes = require('apisix.router').stream_routes
|
||||
local get_services = require('apisix.http.service').services
|
||||
local values = get_upstreams()
|
||||
iter_and_add_service(services, values)
|
||||
values = get_routes()
|
||||
iter_and_add_service(services, values)
|
||||
values = get_services()
|
||||
iter_and_add_service(services, values)
|
||||
values = get_stream_routes()
|
||||
iter_and_add_service(services, values)
|
||||
return services
|
||||
end
|
||||
|
||||
local function is_grpc(scheme)
|
||||
if scheme == 'grpc' or scheme == 'grpcs' then
|
||||
return true
|
||||
end
|
||||
|
||||
return false
|
||||
end
|
||||
|
||||
local curr_service_in_use = {}
|
||||
local function fetch_full_registry(premature)
|
||||
if premature then
|
||||
return
|
||||
end
|
||||
|
||||
local base_uri, username, password = get_base_uri()
|
||||
local token_param, err = get_token_param(base_uri, username, password)
|
||||
if err then
|
||||
log.error('get_token_param error:', err)
|
||||
return
|
||||
end
|
||||
|
||||
local infos = get_nacos_services()
|
||||
if #infos == 0 then
|
||||
return
|
||||
end
|
||||
local service_names = {}
|
||||
for _, service_info in ipairs(infos) do
|
||||
local data, err
|
||||
local namespace_id = service_info.namespace_id
|
||||
local group_name = service_info.group_name
|
||||
local scheme = service_info.scheme or ''
|
||||
local namespace_param = get_namespace_param(service_info.namespace_id)
|
||||
local group_name_param = get_group_name_param(service_info.group_name)
|
||||
local signature_param = get_signed_param(service_info.group_name, service_info.service_name)
|
||||
local query_path = instance_list_path .. service_info.service_name
|
||||
.. token_param .. namespace_param .. group_name_param
|
||||
.. signature_param
|
||||
data, err = get_url(base_uri, query_path)
|
||||
if err then
|
||||
log.error('get_url:', query_path, ' err:', err)
|
||||
goto CONTINUE
|
||||
end
|
||||
|
||||
local nodes = {}
|
||||
local key = get_key(namespace_id, group_name, service_info.service_name)
|
||||
service_names[key] = true
|
||||
for _, host in ipairs(data.hosts) do
|
||||
local node = {
|
||||
host = host.ip,
|
||||
port = host.port,
|
||||
weight = host.weight or default_weight,
|
||||
}
|
||||
-- docs: https://github.com/yidongnan/grpc-spring-boot-starter/pull/496
|
||||
if is_grpc(scheme) and host.metadata and host.metadata.gRPC_port then
|
||||
node.port = host.metadata.gRPC_port
|
||||
end
|
||||
|
||||
core.table.insert(nodes, node)
|
||||
end
|
||||
if #nodes > 0 then
|
||||
local content = core.json.encode(nodes)
|
||||
nacos_dict:set(key, content)
|
||||
end
|
||||
::CONTINUE::
|
||||
end
|
||||
-- remove services that are not in use anymore
|
||||
for key, _ in pairs(curr_service_in_use) do
|
||||
if not service_names[key] then
|
||||
nacos_dict:delete(key)
|
||||
end
|
||||
end
|
||||
curr_service_in_use = service_names
|
||||
end
|
||||
|
||||
|
||||
function _M.nodes(service_name, discovery_args)
|
||||
local namespace_id = discovery_args and
|
||||
discovery_args.namespace_id or default_namespace_id
|
||||
local group_name = discovery_args
|
||||
and discovery_args.group_name or default_group_name
|
||||
local key = get_key(namespace_id, group_name, service_name)
|
||||
local value = nacos_dict:get(key)
|
||||
if not value then
|
||||
core.log.error("nacos service not found: ", service_name)
|
||||
return nil
|
||||
end
|
||||
local nodes = core.json.decode(value)
|
||||
return nodes
|
||||
end
|
||||
|
||||
|
||||
function _M.init_worker()
|
||||
default_weight = local_conf.discovery.nacos.weight
|
||||
log.info('default_weight:', default_weight)
|
||||
local fetch_interval = local_conf.discovery.nacos.fetch_interval
|
||||
log.info('fetch_interval:', fetch_interval)
|
||||
access_key = local_conf.discovery.nacos.access_key
|
||||
secret_key = local_conf.discovery.nacos.secret_key
|
||||
ngx_timer_at(0, fetch_full_registry)
|
||||
ngx_timer_every(fetch_interval, fetch_full_registry)
|
||||
end
|
||||
|
||||
|
||||
function _M.dump_data()
|
||||
local keys = nacos_dict:get_keys(0)
|
||||
local applications = {}
|
||||
for _, key in ipairs(keys) do
|
||||
local value = nacos_dict:get(key)
|
||||
if value then
|
||||
local nodes = core.json.decode(value)
|
||||
if nodes then
|
||||
applications[key] = {
|
||||
nodes = nodes,
|
||||
}
|
||||
end
|
||||
end
|
||||
end
|
||||
return {services = applications or {}}
|
||||
end
|
||||
|
||||
|
||||
return _M
|
@@ -0,0 +1,59 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
local host_pattern = [[^http(s)?:\/\/([a-zA-Z0-9-_.]+:.+\@)?[a-zA-Z0-9-_.:]+$]]
|
||||
local prefix_pattern = [[^[\/a-zA-Z0-9-_.]+$]]
|
||||
|
||||
|
||||
return {
|
||||
type = 'object',
|
||||
properties = {
|
||||
host = {
|
||||
type = 'array',
|
||||
minItems = 1,
|
||||
items = {
|
||||
type = 'string',
|
||||
pattern = host_pattern,
|
||||
minLength = 2,
|
||||
maxLength = 100,
|
||||
},
|
||||
},
|
||||
fetch_interval = {type = 'integer', minimum = 1, default = 30},
|
||||
prefix = {
|
||||
type = 'string',
|
||||
pattern = prefix_pattern,
|
||||
maxLength = 100,
|
||||
default = '/nacos/v1/'
|
||||
},
|
||||
weight = {type = 'integer', minimum = 1, default = 100},
|
||||
timeout = {
|
||||
type = 'object',
|
||||
properties = {
|
||||
connect = {type = 'integer', minimum = 1, default = 2000},
|
||||
send = {type = 'integer', minimum = 1, default = 2000},
|
||||
read = {type = 'integer', minimum = 1, default = 5000},
|
||||
},
|
||||
default = {
|
||||
connect = 2000,
|
||||
send = 2000,
|
||||
read = 5000,
|
||||
}
|
||||
},
|
||||
access_key = {type = 'string', default = ''},
|
||||
secret_key = {type = 'string', default = ''},
|
||||
},
|
||||
required = {'host'}
|
||||
}
|
@@ -0,0 +1,367 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
local ngx = ngx
|
||||
local format = string.format
|
||||
local ipairs = ipairs
|
||||
local error = error
|
||||
local tonumber = tonumber
|
||||
local local_conf = require("apisix.core.config_local").local_conf()
|
||||
local core = require("apisix.core")
|
||||
local mysql = require("resty.mysql")
|
||||
local is_http = ngx.config.subsystem == "http"
|
||||
local process = require("ngx.process")
|
||||
|
||||
local endpoint_dict
|
||||
|
||||
local full_query_sql = [[ select servant, group_concat(endpoint order by endpoint) as endpoints
|
||||
from t_server_conf left join t_adapter_conf tac using (application, server_name, node_name)
|
||||
where setting_state = 'active' and present_state = 'active'
|
||||
group by servant ]]
|
||||
|
||||
local incremental_query_sql = [[
|
||||
select servant, (setting_state = 'active' and present_state = 'active') activated,
|
||||
group_concat(endpoint order by endpoint) endpoints
|
||||
from t_server_conf left join t_adapter_conf tac using (application, server_name, node_name)
|
||||
where (application, server_name) in
|
||||
(
|
||||
select application, server_name from t_server_conf
|
||||
where registry_timestamp > now() - interval %d second
|
||||
union
|
||||
select application, server_name from t_adapter_conf
|
||||
where registry_timestamp > now() - interval %d second
|
||||
)
|
||||
group by servant, activated order by activated desc ]]
|
||||
|
||||
local _M = {
|
||||
version = 0.1,
|
||||
}
|
||||
|
||||
local default_weight
|
||||
|
||||
local last_fetch_full_time = 0
|
||||
local last_db_error
|
||||
|
||||
local endpoint_lrucache = core.lrucache.new({
|
||||
ttl = 300,
|
||||
count = 1024
|
||||
})
|
||||
|
||||
local activated_buffer = core.table.new(10, 0)
|
||||
local nodes_buffer = core.table.new(0, 5)
|
||||
|
||||
|
||||
--[[
|
||||
endpoints format as follows:
|
||||
tcp -h 172.16.1.1 -p 11 -t 6000 -e 0,tcp -e 0 -p 12 -h 172.16.1.1,tcp -p 13 -h 172.16.1.1
|
||||
we extract host and port value via endpoints_pattern
|
||||
--]]
|
||||
local endpoints_pattern = core.table.concat(
|
||||
{ [[tcp(\s*-[te]\s*(\S+)){0,2}\s*-([hpHP])\s*(\S+)(\s*-[teTE]\s*(\S+))]],
|
||||
[[{0,2}\s*-([hpHP])\s*(\S+)(\s*-[teTE]\s*(\S+)){0,2}\s*(,|$)]] }
|
||||
)
|
||||
|
||||
|
||||
local function update_endpoint(servant, nodes)
|
||||
local endpoint_content = core.json.encode(nodes, true)
|
||||
local endpoint_version = ngx.crc32_long(endpoint_content)
|
||||
core.log.debug("set servant ", servant, endpoint_content)
|
||||
local _, err
|
||||
_, err = endpoint_dict:safe_set(servant .. "#version", endpoint_version)
|
||||
if err then
|
||||
core.log.error("set endpoint version into nginx shared dict failed, ", err)
|
||||
return
|
||||
end
|
||||
_, err = endpoint_dict:safe_set(servant, endpoint_content)
|
||||
if err then
|
||||
core.log.error("set endpoint into nginx shared dict failed, ", err)
|
||||
endpoint_dict:delete(servant .. "#version")
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
local function delete_endpoint(servant)
|
||||
core.log.info("delete servant ", servant)
|
||||
endpoint_dict:delete(servant .. "#version")
|
||||
endpoint_dict:delete(servant)
|
||||
end
|
||||
|
||||
|
||||
local function add_endpoint_to_lrucache(servant)
|
||||
local endpoint_content, err = endpoint_dict:get_stale(servant)
|
||||
if not endpoint_content then
|
||||
core.log.error("get empty endpoint content, servant: ", servant, ", err: ", err)
|
||||
return nil
|
||||
end
|
||||
|
||||
local endpoint, err = core.json.decode(endpoint_content)
|
||||
if not endpoint then
|
||||
core.log.error("decode json failed, content: ", endpoint_content, ", err: ", err)
|
||||
return nil
|
||||
end
|
||||
|
||||
return endpoint
|
||||
end
|
||||
|
||||
|
||||
local function get_endpoint(servant)
|
||||
|
||||
--[[
|
||||
fetch_full function will:
|
||||
1: call endpoint_dict:flush_all()
|
||||
2: setup servant:nodes pairs into endpoint_dict
|
||||
3: call endpoint_dict:flush_expired()
|
||||
|
||||
get_endpoint may be called during the 2 step of the fetch_full function,
|
||||
so we must use endpoint_dict:get_stale() to get value instead endpoint_dict:get()
|
||||
--]]
|
||||
|
||||
local endpoint_version, err = endpoint_dict:get_stale(servant .. "#version")
|
||||
if not endpoint_version then
|
||||
if err then
|
||||
core.log.error("get empty endpoint version, servant: ", servant, ", err: ", err)
|
||||
end
|
||||
return nil
|
||||
end
|
||||
return endpoint_lrucache(servant, endpoint_version, add_endpoint_to_lrucache, servant)
|
||||
end
|
||||
|
||||
|
||||
local function extract_endpoint(query_result)
|
||||
for _, p in ipairs(query_result) do
|
||||
repeat
|
||||
local servant = p.servant
|
||||
|
||||
if servant == ngx.null then
|
||||
break
|
||||
end
|
||||
|
||||
if p.activated == 1 then
|
||||
activated_buffer[servant] = ngx.null
|
||||
elseif p.activated == 0 then
|
||||
if activated_buffer[servant] == nil then
|
||||
delete_endpoint(servant)
|
||||
end
|
||||
break
|
||||
end
|
||||
|
||||
core.table.clear(nodes_buffer)
|
||||
local iterator = ngx.re.gmatch(p.endpoints, endpoints_pattern, "jao")
|
||||
while true do
|
||||
local captures, err = iterator()
|
||||
if err then
|
||||
core.log.error("gmatch failed, error: ", err, " , endpoints: ", p.endpoints)
|
||||
break
|
||||
end
|
||||
|
||||
if not captures then
|
||||
break
|
||||
end
|
||||
|
||||
local host, port
|
||||
if captures[3] == "h" or captures[3] == "H" then
|
||||
host = captures[4]
|
||||
port = tonumber(captures[8])
|
||||
else
|
||||
host = captures[8]
|
||||
port = tonumber(captures[4])
|
||||
end
|
||||
|
||||
core.table.insert(nodes_buffer, {
|
||||
host = host,
|
||||
port = port,
|
||||
weight = default_weight,
|
||||
})
|
||||
end
|
||||
update_endpoint(servant, nodes_buffer)
|
||||
until true
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
local function fetch_full(db_cli)
|
||||
local res, err, errcode, sqlstate = db_cli:query(full_query_sql)
|
||||
--[[
|
||||
res format is as follows:
|
||||
{
|
||||
{
|
||||
servant = "A.AServer.FirstObj",
|
||||
endpoints = "tcp -h 172.16.1.1 -p 10001 -e 0 -t 3000,tcp -p 10002 -h 172.16.1.2 -t 3000"
|
||||
},
|
||||
{
|
||||
servant = "A.AServer.SecondObj",
|
||||
endpoints = "tcp -t 3000 -p 10002 -h 172.16.1.2"
|
||||
},
|
||||
}
|
||||
|
||||
if current endpoint_dict is as follows:
|
||||
key1:nodes1, key2:nodes2, key3:nodes3
|
||||
|
||||
then fetch_full get follow results:
|
||||
key1:nodes1, key4:nodes4, key5:nodes5
|
||||
|
||||
at this time, we need
|
||||
1: setup key4:nodes4, key5:nodes5
|
||||
2: delete key2:nodes2, key3:nodes3
|
||||
|
||||
to achieve goals, we should:
|
||||
1: before setup results, execute endpoint_dict:flush_all()
|
||||
2: after setup results, execute endpoint_dict:flush_expired()
|
||||
--]]
|
||||
if not res then
|
||||
core.log.error("query failed, error: ", err, ", ", errcode, " ", sqlstate)
|
||||
return err
|
||||
end
|
||||
|
||||
endpoint_dict:flush_all()
|
||||
extract_endpoint(res)
|
||||
|
||||
while err == "again" do
|
||||
res, err, errcode, sqlstate = db_cli:read_result()
|
||||
if not res then
|
||||
if err then
|
||||
core.log.error("read result failed, error: ", err, ", ", errcode, " ", sqlstate)
|
||||
end
|
||||
return err
|
||||
end
|
||||
extract_endpoint(res)
|
||||
end
|
||||
endpoint_dict:flush_expired()
|
||||
|
||||
return nil
|
||||
end
|
||||
|
||||
|
||||
local function fetch_incremental(db_cli)
|
||||
local res, err, errcode, sqlstate = db_cli:query(incremental_query_sql)
|
||||
--[[
|
||||
res is as follows:
|
||||
{
|
||||
{
|
||||
activated=1,
|
||||
servant = "A.AServer.FirstObj",
|
||||
endpoints = "tcp -h 172.16.1.1 -p 10001 -e 0 -t 3000,tcp -p 10002 -h 172.16.1.2 -t 3000"
|
||||
},
|
||||
{
|
||||
activated=0,
|
||||
servant = "A.AServer.FirstObj",
|
||||
endpoints = "tcp -t 3000 -p 10001 -h 172.16.1.3"
|
||||
},
|
||||
{
|
||||
activated=0,
|
||||
servant = "B.BServer.FirstObj",
|
||||
endpoints = "tcp -t 3000 -p 10002 -h 172.16.1.2"
|
||||
},
|
||||
}
|
||||
|
||||
for each item:
|
||||
if activated==1, setup
|
||||
if activated==0, if there is a other item had same servant and activate==1, ignore
|
||||
if activated==0, and there is no other item had same servant, delete
|
||||
--]]
|
||||
if not res then
|
||||
core.log.error("query failed, error: ", err, ", ", errcode, " ", sqlstate)
|
||||
return err
|
||||
end
|
||||
|
||||
core.table.clear(activated_buffer)
|
||||
extract_endpoint(res)
|
||||
|
||||
while err == "again" do
|
||||
res, err, errcode, sqlstate = db_cli:read_result()
|
||||
if not res then
|
||||
if err then
|
||||
core.log.error("read result failed, error: ", err, ", ", errcode, " ", sqlstate)
|
||||
end
|
||||
return err
|
||||
end
|
||||
extract_endpoint(res)
|
||||
end
|
||||
|
||||
return nil
|
||||
end
|
||||
|
||||
|
||||
local function fetch_endpoint(premature, conf)
|
||||
if premature then
|
||||
return
|
||||
end
|
||||
|
||||
local db_cli, err = mysql:new()
|
||||
if not db_cli then
|
||||
core.log.error("failed to instantiate mysql: ", err)
|
||||
return
|
||||
end
|
||||
db_cli:set_timeout(3000)
|
||||
|
||||
local ok, err, errcode, sqlstate = db_cli:connect(conf.db_conf)
|
||||
if not ok then
|
||||
core.log.error("failed to connect mysql: ", err, ", ", errcode, ", ", sqlstate)
|
||||
return
|
||||
end
|
||||
|
||||
local now = ngx.time()
|
||||
|
||||
if last_db_error or last_fetch_full_time + conf.full_fetch_interval <= now then
|
||||
last_fetch_full_time = now
|
||||
last_db_error = fetch_full(db_cli)
|
||||
else
|
||||
last_db_error = fetch_incremental(db_cli)
|
||||
end
|
||||
|
||||
if not last_db_error then
|
||||
db_cli:set_keepalive(120 * 1000, 1)
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
function _M.nodes(servant)
|
||||
return get_endpoint(servant)
|
||||
end
|
||||
|
||||
local function get_endpoint_dict()
|
||||
local shm = "tars"
|
||||
|
||||
if not is_http then
|
||||
shm = shm .. "-stream"
|
||||
end
|
||||
|
||||
return ngx.shared[shm]
|
||||
end
|
||||
|
||||
function _M.init_worker()
|
||||
endpoint_dict = get_endpoint_dict()
|
||||
if not endpoint_dict then
|
||||
error("failed to get lua_shared_dict: tars, please check your APISIX version")
|
||||
end
|
||||
|
||||
if process.type() ~= "privileged agent" then
|
||||
return
|
||||
end
|
||||
|
||||
local conf = local_conf.discovery.tars
|
||||
default_weight = conf.default_weight
|
||||
|
||||
core.log.info("conf ", core.json.delay_encode(conf))
|
||||
local backtrack_time = conf.incremental_fetch_interval + 5
|
||||
incremental_query_sql = format(incremental_query_sql, backtrack_time, backtrack_time)
|
||||
|
||||
ngx.timer.at(0, fetch_endpoint, conf)
|
||||
ngx.timer.every(conf.incremental_fetch_interval, fetch_endpoint, conf)
|
||||
end
|
||||
|
||||
|
||||
return _M
|
@@ -0,0 +1,45 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
|
||||
local host_pattern = [[^([a-zA-Z0-9-_.]+:.+\@)?[a-zA-Z0-9-_.:]+$]]
|
||||
|
||||
return {
|
||||
type = 'object',
|
||||
properties = {
|
||||
db_conf = {
|
||||
type = 'object',
|
||||
properties = {
|
||||
host = { type = 'string', minLength = 1, maxLength = 500, pattern = host_pattern },
|
||||
port = { type = 'integer', minimum = 1, maximum = 65535, default = 3306 },
|
||||
database = { type = 'string', minLength = 1, maxLength = 64 },
|
||||
user = { type = 'string', minLength = 1, maxLength = 64 },
|
||||
password = { type = 'string', minLength = 1, maxLength = 64 },
|
||||
},
|
||||
required = { 'host', 'database', 'user', 'password' }
|
||||
},
|
||||
full_fetch_interval = {
|
||||
type = 'integer', minimum = 90, maximum = 3600, default = 300,
|
||||
},
|
||||
incremental_fetch_interval = {
|
||||
type = 'integer', minimum = 5, maximum = 60, default = 15,
|
||||
},
|
||||
default_weight = {
|
||||
type = 'integer', minimum = 0, maximum = 100, default = 100,
|
||||
},
|
||||
},
|
||||
required = { 'db_conf' }
|
||||
}
|
Reference in New Issue
Block a user