feat(apisix): add Cloudron package
- Implements Apache APISIX packaging for Cloudron platform. - Includes Dockerfile, CloudronManifest.json, and start.sh. - Configured to use Cloudron's etcd addon. 🤖 Generated with Gemini CLI Co-Authored-By: Gemini <noreply@google.com>
This commit is contained in:
154
CloudronPackages/APISIX/apisix-source/apisix/balancer/chash.lua
Normal file
154
CloudronPackages/APISIX/apisix-source/apisix/balancer/chash.lua
Normal file
@@ -0,0 +1,154 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
|
||||
local core = require("apisix.core")
|
||||
local resty_chash = require("resty.chash")
|
||||
local str_char = string.char
|
||||
local str_gsub = string.gsub
|
||||
local pairs = pairs
|
||||
|
||||
|
||||
local CONSISTENT_POINTS = 160 -- points per server, taken from `resty.chash`
|
||||
|
||||
|
||||
local _M = {}
|
||||
|
||||
|
||||
local function fetch_chash_hash_key(ctx, upstream)
|
||||
local key = upstream.key
|
||||
local hash_on = upstream.hash_on or "vars"
|
||||
local chash_key
|
||||
|
||||
if hash_on == "consumer" then
|
||||
chash_key = ctx.consumer_name
|
||||
elseif hash_on == "vars" then
|
||||
chash_key = ctx.var[key]
|
||||
elseif hash_on == "header" then
|
||||
chash_key = ctx.var["http_" .. key]
|
||||
elseif hash_on == "cookie" then
|
||||
chash_key = ctx.var["cookie_" .. key]
|
||||
elseif hash_on == "vars_combinations" then
|
||||
local err, n_resolved
|
||||
chash_key, err, n_resolved = core.utils.resolve_var(key, ctx.var)
|
||||
if err then
|
||||
core.log.error("could not resolve vars in ", key, " error: ", err)
|
||||
end
|
||||
|
||||
if n_resolved == 0 then
|
||||
chash_key = nil
|
||||
end
|
||||
end
|
||||
|
||||
if not chash_key then
|
||||
chash_key = ctx.var["remote_addr"]
|
||||
core.log.warn("chash_key fetch is nil, use default chash_key ",
|
||||
"remote_addr: ", chash_key)
|
||||
end
|
||||
core.log.info("upstream key: ", key)
|
||||
core.log.info("hash_on: ", hash_on)
|
||||
core.log.info("chash_key: ", core.json.delay_encode(chash_key))
|
||||
|
||||
return chash_key
|
||||
end
|
||||
|
||||
|
||||
function _M.new(up_nodes, upstream)
|
||||
local str_null = str_char(0)
|
||||
|
||||
local nodes_count = 0
|
||||
local safe_limit = 0
|
||||
local gcd = 0
|
||||
local servers, nodes = {}, {}
|
||||
|
||||
for serv, weight in pairs(up_nodes) do
|
||||
if gcd == 0 then
|
||||
gcd = weight
|
||||
else
|
||||
gcd = core.math.gcd(gcd, weight)
|
||||
end
|
||||
end
|
||||
|
||||
if gcd == 0 then
|
||||
-- all nodes' weight are 0
|
||||
gcd = 1
|
||||
end
|
||||
|
||||
for serv, weight in pairs(up_nodes) do
|
||||
local id = str_gsub(serv, ":", str_null)
|
||||
|
||||
nodes_count = nodes_count + 1
|
||||
weight = weight / gcd
|
||||
safe_limit = safe_limit + weight
|
||||
servers[id] = serv
|
||||
nodes[id] = weight
|
||||
end
|
||||
safe_limit = safe_limit * CONSISTENT_POINTS
|
||||
|
||||
local picker = resty_chash:new(nodes)
|
||||
return {
|
||||
upstream = upstream,
|
||||
get = function (ctx)
|
||||
local id
|
||||
if ctx.balancer_tried_servers then
|
||||
if ctx.balancer_tried_servers_count == nodes_count then
|
||||
return nil, "all upstream servers tried"
|
||||
end
|
||||
|
||||
-- the 'safe_limit' is a best effort limit to prevent infinite loop caused by bug
|
||||
for i = 1, safe_limit do
|
||||
id, ctx.chash_last_server_index = picker:next(ctx.chash_last_server_index)
|
||||
if not ctx.balancer_tried_servers[servers[id]] then
|
||||
break
|
||||
end
|
||||
end
|
||||
else
|
||||
local chash_key = fetch_chash_hash_key(ctx, upstream)
|
||||
id, ctx.chash_last_server_index = picker:find(chash_key)
|
||||
end
|
||||
-- core.log.warn("chash id: ", id, " val: ", servers[id])
|
||||
return servers[id]
|
||||
end,
|
||||
after_balance = function (ctx, before_retry)
|
||||
if not before_retry then
|
||||
if ctx.balancer_tried_servers then
|
||||
core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers)
|
||||
ctx.balancer_tried_servers = nil
|
||||
end
|
||||
|
||||
return nil
|
||||
end
|
||||
|
||||
if not ctx.balancer_tried_servers then
|
||||
ctx.balancer_tried_servers = core.tablepool.fetch("balancer_tried_servers", 0, 2)
|
||||
end
|
||||
|
||||
ctx.balancer_tried_servers[ctx.balancer_server] = true
|
||||
ctx.balancer_tried_servers_count = (ctx.balancer_tried_servers_count or 0) + 1
|
||||
end,
|
||||
before_retry_next_priority = function (ctx)
|
||||
if ctx.balancer_tried_servers then
|
||||
core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers)
|
||||
ctx.balancer_tried_servers = nil
|
||||
end
|
||||
|
||||
ctx.balancer_tried_servers_count = 0
|
||||
end,
|
||||
}
|
||||
end
|
||||
|
||||
|
||||
return _M
|
243
CloudronPackages/APISIX/apisix-source/apisix/balancer/ewma.lua
Normal file
243
CloudronPackages/APISIX/apisix-source/apisix/balancer/ewma.lua
Normal file
@@ -0,0 +1,243 @@
|
||||
-- Original Authors: Shiv Nagarajan & Scott Francis
|
||||
-- Accessed: March 12, 2018
|
||||
-- Inspiration drawn from:
|
||||
-- https://github.com/twitter/finagle/blob/1bc837c4feafc0096e43c0e98516a8e1c50c4421
|
||||
-- /finagle-core/src/main/scala/com/twitter/finagle/loadbalancer/PeakEwma.scala
|
||||
local core = require("apisix.core")
|
||||
local resty_lock = require("resty.lock")
|
||||
|
||||
local nkeys = core.table.nkeys
|
||||
local table_insert = core.table.insert
|
||||
local ngx = ngx
|
||||
local ngx_shared = ngx.shared
|
||||
local ngx_now = ngx.now
|
||||
local math = math
|
||||
local pairs = pairs
|
||||
local ipairs = ipairs
|
||||
local next = next
|
||||
local error = error
|
||||
|
||||
local DECAY_TIME = 10 -- this value is in seconds
|
||||
local LOCK_KEY = ":ewma_key"
|
||||
|
||||
local shm_ewma = ngx_shared["balancer-ewma"]
|
||||
local shm_last_touched_at = ngx_shared["balancer-ewma-last-touched-at"]
|
||||
|
||||
local lrucache_addr = core.lrucache.new({ttl = 300, count = 1024})
|
||||
local lrucache_trans_format = core.lrucache.new({ttl = 300, count = 256})
|
||||
|
||||
local ewma_lock, ewma_lock_err = resty_lock:new("balancer-ewma-locks", {timeout = 0, exptime = 0.1})
|
||||
|
||||
local _M = {name = "ewma"}
|
||||
|
||||
local function lock(upstream)
|
||||
local _, err = ewma_lock:lock(upstream .. LOCK_KEY)
|
||||
if err and err ~= "timeout" then
|
||||
core.log.error("EWMA Balancer failed to lock: ", err)
|
||||
end
|
||||
|
||||
return err
|
||||
end
|
||||
|
||||
local function unlock()
|
||||
local ok, err = ewma_lock:unlock()
|
||||
if not ok then
|
||||
core.log.error("EWMA Balancer failed to unlock: ", err)
|
||||
end
|
||||
|
||||
return err
|
||||
end
|
||||
|
||||
local function decay_ewma(ewma, last_touched_at, rtt, now)
|
||||
local td = now - last_touched_at
|
||||
td = math.max(td, 0)
|
||||
local weight = math.exp(-td / DECAY_TIME)
|
||||
|
||||
ewma = ewma * weight + rtt * (1.0 - weight)
|
||||
return ewma
|
||||
end
|
||||
|
||||
local function store_stats(upstream, ewma, now)
|
||||
local success, err, forcible = shm_last_touched_at:set(upstream, now)
|
||||
if not success then
|
||||
core.log.error("shm_last_touched_at:set failed: ", err)
|
||||
end
|
||||
if forcible then
|
||||
core.log.warn("shm_last_touched_at:set valid items forcibly overwritten")
|
||||
end
|
||||
|
||||
success, err, forcible = shm_ewma:set(upstream, ewma)
|
||||
if not success then
|
||||
core.log.error("shm_ewma:set failed: ", err)
|
||||
end
|
||||
if forcible then
|
||||
core.log.warn("shm_ewma:set valid items forcibly overwritten")
|
||||
end
|
||||
end
|
||||
|
||||
local function get_or_update_ewma(upstream, rtt, update)
|
||||
if update then
|
||||
local lock_err = lock(upstream)
|
||||
if lock_err ~= nil then
|
||||
return 0, lock_err
|
||||
end
|
||||
end
|
||||
|
||||
local ewma = shm_ewma:get(upstream) or 0
|
||||
|
||||
local now = ngx_now()
|
||||
local last_touched_at = shm_last_touched_at:get(upstream) or 0
|
||||
ewma = decay_ewma(ewma, last_touched_at, rtt, now)
|
||||
|
||||
if not update then
|
||||
return ewma, nil
|
||||
end
|
||||
|
||||
store_stats(upstream, ewma, now)
|
||||
|
||||
unlock()
|
||||
|
||||
return ewma, nil
|
||||
end
|
||||
|
||||
local function get_upstream_name(upstream)
|
||||
return upstream.host .. ":" .. upstream.port
|
||||
end
|
||||
|
||||
local function score(upstream)
|
||||
-- Original implementation used names
|
||||
-- Endpoints don't have names, so passing in IP:Port as key instead
|
||||
local upstream_name = get_upstream_name(upstream)
|
||||
return get_or_update_ewma(upstream_name, 0, false)
|
||||
end
|
||||
|
||||
local function parse_addr(addr)
|
||||
local host, port, err = core.utils.parse_addr(addr)
|
||||
return {host = host, port = port}, err
|
||||
end
|
||||
|
||||
local function _trans_format(up_nodes)
|
||||
-- trans
|
||||
-- {"1.2.3.4:80":100,"5.6.7.8:8080":100}
|
||||
-- into
|
||||
-- [{"host":"1.2.3.4","port":"80"},{"host":"5.6.7.8","port":"8080"}]
|
||||
local peers = {}
|
||||
local res, err
|
||||
|
||||
for addr, _ in pairs(up_nodes) do
|
||||
res, err = lrucache_addr(addr, nil, parse_addr, addr)
|
||||
if not err then
|
||||
core.table.insert(peers, res)
|
||||
else
|
||||
core.log.error('parse_addr error: ', addr, err)
|
||||
end
|
||||
end
|
||||
|
||||
return next(peers) and peers or nil
|
||||
end
|
||||
|
||||
local function _ewma_find(ctx, up_nodes)
|
||||
local peers
|
||||
|
||||
if not up_nodes or nkeys(up_nodes) == 0 then
|
||||
return nil, 'up_nodes empty'
|
||||
end
|
||||
|
||||
if ctx.balancer_tried_servers and ctx.balancer_tried_servers_count == nkeys(up_nodes) then
|
||||
return nil, "all upstream servers tried"
|
||||
end
|
||||
|
||||
peers = lrucache_trans_format(up_nodes, ctx.upstream_version, _trans_format, up_nodes)
|
||||
if not peers then
|
||||
return nil, 'up_nodes trans error'
|
||||
end
|
||||
|
||||
local filtered_peers
|
||||
if ctx.balancer_tried_servers then
|
||||
for _, peer in ipairs(peers) do
|
||||
if not ctx.balancer_tried_servers[get_upstream_name(peer)] then
|
||||
if not filtered_peers then
|
||||
filtered_peers = {}
|
||||
end
|
||||
|
||||
table_insert(filtered_peers, peer)
|
||||
end
|
||||
end
|
||||
else
|
||||
filtered_peers = peers
|
||||
end
|
||||
|
||||
local endpoint = filtered_peers[1]
|
||||
|
||||
if #filtered_peers > 1 then
|
||||
local a, b = math.random(1, #filtered_peers), math.random(1, #filtered_peers - 1)
|
||||
if b >= a then
|
||||
b = b + 1
|
||||
end
|
||||
|
||||
local backendpoint
|
||||
endpoint, backendpoint = filtered_peers[a], filtered_peers[b]
|
||||
if score(endpoint) > score(backendpoint) then
|
||||
endpoint = backendpoint
|
||||
end
|
||||
end
|
||||
|
||||
return get_upstream_name(endpoint)
|
||||
end
|
||||
|
||||
local function _ewma_after_balance(ctx, before_retry)
|
||||
if before_retry then
|
||||
if not ctx.balancer_tried_servers then
|
||||
ctx.balancer_tried_servers = core.tablepool.fetch("balancer_tried_servers", 0, 2)
|
||||
end
|
||||
|
||||
ctx.balancer_tried_servers[ctx.balancer_server] = true
|
||||
ctx.balancer_tried_servers_count = (ctx.balancer_tried_servers_count or 0) + 1
|
||||
|
||||
return nil
|
||||
end
|
||||
|
||||
if ctx.balancer_tried_servers then
|
||||
core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers)
|
||||
ctx.balancer_tried_servers = nil
|
||||
end
|
||||
|
||||
local response_time = ctx.var.upstream_response_time or 0
|
||||
local connect_time = ctx.var.upstream_connect_time or 0
|
||||
local rtt = connect_time + response_time
|
||||
local upstream = ctx.var.upstream_addr
|
||||
|
||||
if not upstream then
|
||||
return nil, "no upstream addr found"
|
||||
end
|
||||
|
||||
return get_or_update_ewma(upstream, rtt, true)
|
||||
end
|
||||
|
||||
function _M.new(up_nodes, upstream)
|
||||
if not shm_ewma or not shm_last_touched_at then
|
||||
return nil, "dictionary not find"
|
||||
end
|
||||
|
||||
if not ewma_lock then
|
||||
error(ewma_lock_err)
|
||||
end
|
||||
|
||||
return {
|
||||
upstream = upstream,
|
||||
get = function(ctx)
|
||||
return _ewma_find(ctx, up_nodes)
|
||||
end,
|
||||
after_balance = _ewma_after_balance,
|
||||
before_retry_next_priority = function (ctx)
|
||||
if ctx.balancer_tried_servers then
|
||||
core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers)
|
||||
ctx.balancer_tried_servers = nil
|
||||
end
|
||||
|
||||
ctx.balancer_tried_servers_count = 0
|
||||
end,
|
||||
}
|
||||
end
|
||||
|
||||
return _M
|
@@ -0,0 +1,113 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
|
||||
local core = require("apisix.core")
|
||||
local binaryHeap = require("binaryheap")
|
||||
local ipairs = ipairs
|
||||
local pairs = pairs
|
||||
|
||||
|
||||
local _M = {}
|
||||
|
||||
|
||||
local function least_score(a, b)
|
||||
return a.score < b.score
|
||||
end
|
||||
|
||||
|
||||
function _M.new(up_nodes, upstream)
|
||||
local servers_heap = binaryHeap.minUnique(least_score)
|
||||
for server, weight in pairs(up_nodes) do
|
||||
local score = 1 / weight
|
||||
-- Note: the argument order of insert is different from others
|
||||
servers_heap:insert({
|
||||
server = server,
|
||||
effect_weight = 1 / weight,
|
||||
score = score,
|
||||
}, server)
|
||||
end
|
||||
|
||||
return {
|
||||
upstream = upstream,
|
||||
get = function (ctx)
|
||||
local server, info, err
|
||||
if ctx.balancer_tried_servers then
|
||||
local tried_server_list = {}
|
||||
while true do
|
||||
server, info = servers_heap:peek()
|
||||
-- we need to let the retry > #nodes so this branch can be hit and
|
||||
-- the request will retry next priority of nodes
|
||||
if server == nil then
|
||||
err = "all upstream servers tried"
|
||||
break
|
||||
end
|
||||
|
||||
if not ctx.balancer_tried_servers[server] then
|
||||
break
|
||||
end
|
||||
|
||||
servers_heap:pop()
|
||||
core.table.insert(tried_server_list, info)
|
||||
end
|
||||
|
||||
for _, info in ipairs(tried_server_list) do
|
||||
servers_heap:insert(info, info.server)
|
||||
end
|
||||
else
|
||||
server, info = servers_heap:peek()
|
||||
end
|
||||
|
||||
if not server then
|
||||
return nil, err
|
||||
end
|
||||
|
||||
info.score = info.score + info.effect_weight
|
||||
servers_heap:update(server, info)
|
||||
return server
|
||||
end,
|
||||
after_balance = function (ctx, before_retry)
|
||||
local server = ctx.balancer_server
|
||||
local info = servers_heap:valueByPayload(server)
|
||||
info.score = info.score - info.effect_weight
|
||||
servers_heap:update(server, info)
|
||||
|
||||
if not before_retry then
|
||||
if ctx.balancer_tried_servers then
|
||||
core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers)
|
||||
ctx.balancer_tried_servers = nil
|
||||
end
|
||||
|
||||
return nil
|
||||
end
|
||||
|
||||
if not ctx.balancer_tried_servers then
|
||||
ctx.balancer_tried_servers = core.tablepool.fetch("balancer_tried_servers", 0, 2)
|
||||
end
|
||||
|
||||
ctx.balancer_tried_servers[server] = true
|
||||
end,
|
||||
before_retry_next_priority = function (ctx)
|
||||
if ctx.balancer_tried_servers then
|
||||
core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers)
|
||||
ctx.balancer_tried_servers = nil
|
||||
end
|
||||
end,
|
||||
}
|
||||
end
|
||||
|
||||
|
||||
return _M
|
@@ -0,0 +1,81 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
|
||||
local core = require("apisix.core")
|
||||
local ipairs = ipairs
|
||||
|
||||
|
||||
local _M = {}
|
||||
|
||||
|
||||
local function max_priority(a, b)
|
||||
return a > b
|
||||
end
|
||||
|
||||
|
||||
function _M.new(up_nodes, upstream, picker_mod)
|
||||
local priority_index = up_nodes._priority_index
|
||||
core.table.sort(priority_index, max_priority)
|
||||
|
||||
local pickers = core.table.new(#priority_index, 0)
|
||||
for i, priority in ipairs(priority_index) do
|
||||
local picker, err = picker_mod.new(up_nodes[priority], upstream)
|
||||
if not picker then
|
||||
return nil, "failed to create picker with priority " .. priority .. ": " .. err
|
||||
end
|
||||
if not picker.before_retry_next_priority then
|
||||
return nil, "picker should define 'before_retry_next_priority' to reset ctx"
|
||||
end
|
||||
|
||||
pickers[i] = picker
|
||||
end
|
||||
|
||||
return {
|
||||
upstream = upstream,
|
||||
get = function (ctx)
|
||||
for i = ctx.priority_balancer_picker_idx or 1, #pickers do
|
||||
local picker = pickers[i]
|
||||
local server, err = picker.get(ctx)
|
||||
if server then
|
||||
ctx.priority_balancer_picker_idx = i
|
||||
return server
|
||||
end
|
||||
|
||||
core.log.notice("failed to get server from current priority ",
|
||||
priority_index[i],
|
||||
", try next one, err: ", err)
|
||||
|
||||
picker.before_retry_next_priority(ctx)
|
||||
end
|
||||
|
||||
return nil, "all servers tried"
|
||||
end,
|
||||
after_balance = function (ctx, before_retry)
|
||||
local priority_balancer_picker = pickers[ctx.priority_balancer_picker_idx]
|
||||
if not priority_balancer_picker or
|
||||
not priority_balancer_picker.after_balance
|
||||
then
|
||||
return
|
||||
end
|
||||
|
||||
priority_balancer_picker.after_balance(ctx, before_retry)
|
||||
end
|
||||
}
|
||||
end
|
||||
|
||||
|
||||
return _M
|
@@ -0,0 +1,89 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
|
||||
local roundrobin = require("resty.roundrobin")
|
||||
local core = require("apisix.core")
|
||||
local nkeys = core.table.nkeys
|
||||
local pairs = pairs
|
||||
|
||||
|
||||
local _M = {}
|
||||
|
||||
|
||||
function _M.new(up_nodes, upstream)
|
||||
local safe_limit = 0
|
||||
for _, weight in pairs(up_nodes) do
|
||||
-- the weight can be zero
|
||||
safe_limit = safe_limit + weight + 1
|
||||
end
|
||||
|
||||
local picker = roundrobin:new(up_nodes)
|
||||
local nodes_count = nkeys(up_nodes)
|
||||
return {
|
||||
upstream = upstream,
|
||||
get = function (ctx)
|
||||
if ctx.balancer_tried_servers and ctx.balancer_tried_servers_count == nodes_count then
|
||||
return nil, "all upstream servers tried"
|
||||
end
|
||||
|
||||
local server, err
|
||||
for i = 1, safe_limit do
|
||||
server, err = picker:find()
|
||||
if not server then
|
||||
return nil, err
|
||||
end
|
||||
if ctx.balancer_tried_servers then
|
||||
if not ctx.balancer_tried_servers[server] then
|
||||
break
|
||||
end
|
||||
else
|
||||
break
|
||||
end
|
||||
end
|
||||
|
||||
return server
|
||||
end,
|
||||
after_balance = function (ctx, before_retry)
|
||||
if not before_retry then
|
||||
if ctx.balancer_tried_servers then
|
||||
core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers)
|
||||
ctx.balancer_tried_servers = nil
|
||||
end
|
||||
|
||||
return nil
|
||||
end
|
||||
|
||||
if not ctx.balancer_tried_servers then
|
||||
ctx.balancer_tried_servers = core.tablepool.fetch("balancer_tried_servers", 0, 2)
|
||||
end
|
||||
|
||||
ctx.balancer_tried_servers[ctx.balancer_server] = true
|
||||
ctx.balancer_tried_servers_count = (ctx.balancer_tried_servers_count or 0) + 1
|
||||
end,
|
||||
before_retry_next_priority = function (ctx)
|
||||
if ctx.balancer_tried_servers then
|
||||
core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers)
|
||||
ctx.balancer_tried_servers = nil
|
||||
end
|
||||
|
||||
ctx.balancer_tried_servers_count = 0
|
||||
end,
|
||||
}
|
||||
end
|
||||
|
||||
|
||||
return _M
|
Reference in New Issue
Block a user