feat(apisix): add Cloudron package
- Implements Apache APISIX packaging for Cloudron platform. - Includes Dockerfile, CloudronManifest.json, and start.sh. - Configured to use Cloudron's etcd addon. 🤖 Generated with Gemini CLI Co-Authored-By: Gemini <noreply@google.com>
This commit is contained in:
24
CloudronPackages/APISIX/apisix-source/apisix/utils/auth.lua
Normal file
24
CloudronPackages/APISIX/apisix-source/apisix/utils/auth.lua
Normal file
@@ -0,0 +1,24 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
|
||||
local _M = {}
|
||||
|
||||
function _M.is_running_under_multi_auth(ctx)
|
||||
return ctx._plugin_name == "multi-auth"
|
||||
end
|
||||
|
||||
return _M
|
@@ -0,0 +1,158 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
local core = require("apisix.core")
|
||||
local batch_processor = require("apisix.utils.batch-processor")
|
||||
local timer_at = ngx.timer.at
|
||||
local pairs = pairs
|
||||
local setmetatable = setmetatable
|
||||
|
||||
|
||||
local _M = {}
|
||||
local mt = { __index = _M }
|
||||
|
||||
|
||||
function _M.new(name)
|
||||
return setmetatable({
|
||||
stale_timer_running = false,
|
||||
buffers = {},
|
||||
total_pushed_entries = 0,
|
||||
name = name,
|
||||
}, mt)
|
||||
end
|
||||
|
||||
|
||||
function _M:wrap_schema(schema)
|
||||
local bp_schema = core.table.deepcopy(batch_processor.schema)
|
||||
local properties = schema.properties
|
||||
for k, v in pairs(bp_schema.properties) do
|
||||
if not properties[k] then
|
||||
properties[k] = v
|
||||
end
|
||||
-- don't touch if the plugin overrides the property
|
||||
end
|
||||
|
||||
properties.name.default = self.name
|
||||
return schema
|
||||
end
|
||||
|
||||
|
||||
-- remove stale objects from the memory after timer expires
|
||||
local function remove_stale_objects(premature, self)
|
||||
if premature then
|
||||
return
|
||||
end
|
||||
|
||||
for key, batch in pairs(self.buffers) do
|
||||
if #batch.entry_buffer.entries == 0 and #batch.batch_to_process == 0 then
|
||||
core.log.info("removing batch processor stale object, conf: ",
|
||||
core.json.delay_encode(key))
|
||||
self.buffers[key] = nil
|
||||
end
|
||||
end
|
||||
|
||||
self.stale_timer_running = false
|
||||
end
|
||||
|
||||
|
||||
local check_stale
|
||||
do
|
||||
local interval = 1800
|
||||
|
||||
function check_stale(self)
|
||||
if not self.stale_timer_running then
|
||||
-- run the timer every 30 mins if any log is present
|
||||
timer_at(interval, remove_stale_objects, self)
|
||||
self.stale_timer_running = true
|
||||
end
|
||||
end
|
||||
|
||||
function _M.set_check_stale_interval(time)
|
||||
interval = time
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
local function total_processed_entries(self)
|
||||
local processed_entries = 0
|
||||
for _, log_buffer in pairs(self.buffers) do
|
||||
processed_entries = processed_entries + log_buffer.processed_entries
|
||||
end
|
||||
return processed_entries
|
||||
end
|
||||
|
||||
function _M:add_entry(conf, entry, max_pending_entries)
|
||||
if max_pending_entries then
|
||||
local total_processed_entries_count = total_processed_entries(self)
|
||||
if self.total_pushed_entries - total_processed_entries_count > max_pending_entries then
|
||||
core.log.error("max pending entries limit exceeded. discarding entry.",
|
||||
" total_pushed_entries: ", self.total_pushed_entries,
|
||||
" total_processed_entries: ", total_processed_entries_count,
|
||||
" max_pending_entries: ", max_pending_entries)
|
||||
return
|
||||
end
|
||||
end
|
||||
check_stale(self)
|
||||
|
||||
local log_buffer = self.buffers[conf]
|
||||
if not log_buffer then
|
||||
return false
|
||||
end
|
||||
|
||||
log_buffer:push(entry)
|
||||
self.total_pushed_entries = self.total_pushed_entries + 1
|
||||
return true
|
||||
end
|
||||
|
||||
|
||||
function _M:add_entry_to_new_processor(conf, entry, ctx, func, max_pending_entries)
|
||||
if max_pending_entries then
|
||||
local total_processed_entries_count = total_processed_entries(self)
|
||||
if self.total_pushed_entries - total_processed_entries_count > max_pending_entries then
|
||||
core.log.error("max pending entries limit exceeded. discarding entry.",
|
||||
" total_pushed_entries: ", self.total_pushed_entries,
|
||||
" total_processed_entries: ", total_processed_entries_count,
|
||||
" max_pending_entries: ", max_pending_entries)
|
||||
return
|
||||
end
|
||||
end
|
||||
check_stale(self)
|
||||
|
||||
local config = {
|
||||
name = conf.name,
|
||||
batch_max_size = conf.batch_max_size,
|
||||
max_retry_count = conf.max_retry_count,
|
||||
retry_delay = conf.retry_delay,
|
||||
buffer_duration = conf.buffer_duration,
|
||||
inactive_timeout = conf.inactive_timeout,
|
||||
route_id = ctx.var.route_id,
|
||||
server_addr = ctx.var.server_addr,
|
||||
}
|
||||
|
||||
local log_buffer, err = batch_processor:new(func, config)
|
||||
if not log_buffer then
|
||||
core.log.error("error when creating the batch processor: ", err)
|
||||
return false
|
||||
end
|
||||
|
||||
log_buffer:push(entry)
|
||||
self.buffers[conf] = log_buffer
|
||||
self.total_pushed_entries = self.total_pushed_entries + 1
|
||||
return true
|
||||
end
|
||||
|
||||
|
||||
return _M
|
@@ -0,0 +1,235 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
local core = require("apisix.core")
|
||||
local setmetatable = setmetatable
|
||||
local timer_at = ngx.timer.at
|
||||
local ipairs = ipairs
|
||||
local table = table
|
||||
local now = ngx.now
|
||||
local type = type
|
||||
local batch_processor = {}
|
||||
local batch_processor_mt = {
|
||||
__index = batch_processor
|
||||
}
|
||||
local execute_func
|
||||
local create_buffer_timer
|
||||
local batch_metrics
|
||||
local prometheus
|
||||
if ngx.config.subsystem == "http" then
|
||||
prometheus = require("apisix.plugins.prometheus.exporter")
|
||||
end
|
||||
|
||||
|
||||
local schema = {
|
||||
type = "object",
|
||||
properties = {
|
||||
name = {type = "string", default = "log buffer"},
|
||||
max_retry_count = {type = "integer", minimum = 0, default= 0},
|
||||
retry_delay = {type = "integer", minimum = 0, default= 1},
|
||||
buffer_duration = {type = "integer", minimum = 1, default= 60},
|
||||
inactive_timeout = {type = "integer", minimum = 1, default= 5},
|
||||
batch_max_size = {type = "integer", minimum = 1, default= 1000},
|
||||
}
|
||||
}
|
||||
batch_processor.schema = schema
|
||||
|
||||
|
||||
local function schedule_func_exec(self, delay, batch)
|
||||
local hdl, err = timer_at(delay, execute_func, self, batch)
|
||||
if not hdl then
|
||||
core.log.error("failed to create process timer: ", err)
|
||||
return
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
local function set_metrics(self, count)
|
||||
-- add batch metric for every route
|
||||
if batch_metrics and self.name and self.route_id and self.server_addr then
|
||||
self.label = {self.name, self.route_id, self.server_addr}
|
||||
batch_metrics:set(count, self.label)
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
local function slice_batch(batch, n)
|
||||
local slice = {}
|
||||
local idx = 1
|
||||
for i = n or 1, #batch do
|
||||
slice[idx] = batch[i]
|
||||
idx = idx + 1
|
||||
end
|
||||
return slice
|
||||
end
|
||||
|
||||
|
||||
function execute_func(premature, self, batch)
|
||||
if premature then
|
||||
return
|
||||
end
|
||||
|
||||
-- In case of "err" and a valid "first_fail" batch processor considers, all first_fail-1
|
||||
-- entries have been successfully consumed and hence reschedule the job for entries with
|
||||
-- index first_fail to #entries based on the current retry policy.
|
||||
local ok, err, first_fail = self.func(batch.entries, self.batch_max_size)
|
||||
if not ok then
|
||||
if first_fail then
|
||||
core.log.error("Batch Processor[", self.name, "] failed to process entries [",
|
||||
#batch.entries + 1 - first_fail, "/", #batch.entries ,"]: ", err)
|
||||
batch.entries = slice_batch(batch.entries, first_fail)
|
||||
self.processed_entries = self.processed_entries + first_fail - 1
|
||||
else
|
||||
core.log.error("Batch Processor[", self.name,
|
||||
"] failed to process entries: ", err)
|
||||
end
|
||||
|
||||
batch.retry_count = batch.retry_count + 1
|
||||
if batch.retry_count <= self.max_retry_count and #batch.entries > 0 then
|
||||
schedule_func_exec(self, self.retry_delay,
|
||||
batch)
|
||||
else
|
||||
self.processed_entries = self.processed_entries + #batch.entries
|
||||
core.log.error("Batch Processor[", self.name,"] exceeded ",
|
||||
"the max_retry_count[", batch.retry_count,
|
||||
"] dropping the entries")
|
||||
end
|
||||
return
|
||||
end
|
||||
self.processed_entries = self.processed_entries + #batch.entries
|
||||
core.log.debug("Batch Processor[", self.name,
|
||||
"] successfully processed the entries")
|
||||
end
|
||||
|
||||
|
||||
local function flush_buffer(premature, self)
|
||||
if premature then
|
||||
return
|
||||
end
|
||||
|
||||
if now() - self.last_entry_t >= self.inactive_timeout or
|
||||
now() - self.first_entry_t >= self.buffer_duration
|
||||
then
|
||||
core.log.debug("Batch Processor[", self.name ,"] buffer ",
|
||||
"duration exceeded, activating buffer flush")
|
||||
self:process_buffer()
|
||||
self.is_timer_running = false
|
||||
return
|
||||
end
|
||||
|
||||
-- buffer duration did not exceed or the buffer is active,
|
||||
-- extending the timer
|
||||
core.log.debug("Batch Processor[", self.name ,"] extending buffer timer")
|
||||
create_buffer_timer(self)
|
||||
end
|
||||
|
||||
|
||||
function create_buffer_timer(self)
|
||||
local hdl, err = timer_at(self.inactive_timeout, flush_buffer, self)
|
||||
if not hdl then
|
||||
core.log.error("failed to create buffer timer: ", err)
|
||||
return
|
||||
end
|
||||
self.is_timer_running = true
|
||||
end
|
||||
|
||||
|
||||
function batch_processor:new(func, config)
|
||||
local ok, err = core.schema.check(schema, config)
|
||||
if not ok then
|
||||
return nil, err
|
||||
end
|
||||
|
||||
if type(func) ~= "function" then
|
||||
return nil, "Invalid argument, arg #1 must be a function"
|
||||
end
|
||||
|
||||
local processor = {
|
||||
func = func,
|
||||
buffer_duration = config.buffer_duration,
|
||||
inactive_timeout = config.inactive_timeout,
|
||||
max_retry_count = config.max_retry_count,
|
||||
batch_max_size = config.batch_max_size,
|
||||
retry_delay = config.retry_delay,
|
||||
name = config.name,
|
||||
batch_to_process = {},
|
||||
entry_buffer = {entries = {}, retry_count = 0},
|
||||
is_timer_running = false,
|
||||
first_entry_t = 0,
|
||||
last_entry_t = 0,
|
||||
route_id = config.route_id,
|
||||
server_addr = config.server_addr,
|
||||
processed_entries = 0
|
||||
}
|
||||
|
||||
return setmetatable(processor, batch_processor_mt)
|
||||
end
|
||||
|
||||
function batch_processor:push(entry)
|
||||
-- if the batch size is one then immediately send for processing
|
||||
if self.batch_max_size == 1 then
|
||||
local batch = {entries = {entry}, retry_count = 0}
|
||||
schedule_func_exec(self, 0, batch)
|
||||
return
|
||||
end
|
||||
|
||||
if prometheus and prometheus.get_prometheus() and not batch_metrics and self.name
|
||||
and self.route_id and self.server_addr then
|
||||
batch_metrics = prometheus.get_prometheus():gauge("batch_process_entries",
|
||||
"batch process remaining entries",
|
||||
{"name", "route_id", "server_addr"})
|
||||
end
|
||||
|
||||
local entries = self.entry_buffer.entries
|
||||
table.insert(entries, entry)
|
||||
set_metrics(self, #entries)
|
||||
|
||||
if #entries == 1 then
|
||||
self.first_entry_t = now()
|
||||
end
|
||||
self.last_entry_t = now()
|
||||
|
||||
if self.batch_max_size <= #entries then
|
||||
core.log.debug("Batch Processor[", self.name ,
|
||||
"] batch max size has exceeded")
|
||||
self:process_buffer()
|
||||
end
|
||||
|
||||
if not self.is_timer_running then
|
||||
create_buffer_timer(self)
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
function batch_processor:process_buffer()
|
||||
-- If entries are present in the buffer move the entries to processing
|
||||
if #self.entry_buffer.entries > 0 then
|
||||
core.log.debug("transferring buffer entries to processing pipe line, ",
|
||||
"buffercount[", #self.entry_buffer.entries ,"]")
|
||||
self.batch_to_process[#self.batch_to_process + 1] = self.entry_buffer
|
||||
self.entry_buffer = {entries = {}, retry_count = 0}
|
||||
set_metrics(self, 0)
|
||||
end
|
||||
|
||||
for _, batch in ipairs(self.batch_to_process) do
|
||||
schedule_func_exec(self, 0, batch)
|
||||
end
|
||||
|
||||
self.batch_to_process = {}
|
||||
end
|
||||
|
||||
|
||||
return batch_processor
|
@@ -0,0 +1,112 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
local pcall = pcall
|
||||
local zlib = require("ffi-zlib")
|
||||
local str_buffer = require("string.buffer")
|
||||
local is_br_libs_loaded, brotli = pcall(require, "brotli")
|
||||
local content_decode_funcs = {}
|
||||
local _M = {}
|
||||
|
||||
|
||||
local function inflate_gzip(data)
|
||||
local inputs = str_buffer.new():set(data)
|
||||
local outputs = str_buffer.new()
|
||||
|
||||
local read_inputs = function(size)
|
||||
local data = inputs:get(size)
|
||||
if data == "" then
|
||||
return nil
|
||||
end
|
||||
return data
|
||||
end
|
||||
|
||||
local write_outputs = function(data)
|
||||
return outputs:put(data)
|
||||
end
|
||||
|
||||
local ok, err = zlib.inflateGzip(read_inputs, write_outputs)
|
||||
if not ok then
|
||||
return nil, "inflate gzip err: " .. err
|
||||
end
|
||||
|
||||
return outputs:get()
|
||||
end
|
||||
content_decode_funcs.gzip = inflate_gzip
|
||||
|
||||
|
||||
local function brotli_stream_decode(read_inputs, write_outputs)
|
||||
-- read 64k data per times
|
||||
local read_size = 64 * 1024
|
||||
local decompressor = brotli.decompressor:new()
|
||||
|
||||
local chunk, ok, res
|
||||
repeat
|
||||
chunk = read_inputs(read_size)
|
||||
if chunk then
|
||||
ok, res = pcall(function()
|
||||
return decompressor:decompress(chunk)
|
||||
end)
|
||||
else
|
||||
ok, res = pcall(function()
|
||||
return decompressor:finish()
|
||||
end)
|
||||
end
|
||||
if not ok then
|
||||
return false, res
|
||||
end
|
||||
write_outputs(res)
|
||||
until not chunk
|
||||
|
||||
return true, nil
|
||||
end
|
||||
|
||||
|
||||
local function brotli_decode(data)
|
||||
local inputs = str_buffer.new():set(data)
|
||||
local outputs = str_buffer.new()
|
||||
|
||||
local read_inputs = function(size)
|
||||
local data = inputs:get(size)
|
||||
if data == "" then
|
||||
return nil
|
||||
end
|
||||
return data
|
||||
end
|
||||
|
||||
local write_outputs = function(data)
|
||||
return outputs:put(data)
|
||||
end
|
||||
|
||||
local ok, err = brotli_stream_decode(read_inputs, write_outputs)
|
||||
if not ok then
|
||||
return nil, "brotli decode err: " .. err
|
||||
end
|
||||
|
||||
return outputs:get()
|
||||
end
|
||||
|
||||
if is_br_libs_loaded then
|
||||
content_decode_funcs.br = brotli_decode
|
||||
end
|
||||
|
||||
|
||||
function _M.dispatch_decoder(response_encoding)
|
||||
return content_decode_funcs[response_encoding]
|
||||
end
|
||||
|
||||
|
||||
return _M
|
@@ -0,0 +1,130 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
|
||||
local core = require("apisix.core")
|
||||
local type = type
|
||||
local setmetatable = setmetatable
|
||||
|
||||
local ngx_update_time = ngx.update_time
|
||||
local ngx_time = ngx.time
|
||||
local ngx_encode_args = ngx.encode_args
|
||||
|
||||
local http = require("resty.http")
|
||||
local jwt = require("resty.jwt")
|
||||
|
||||
|
||||
local function get_timestamp()
|
||||
ngx_update_time()
|
||||
return ngx_time()
|
||||
end
|
||||
|
||||
|
||||
local _M = {}
|
||||
|
||||
|
||||
function _M.generate_access_token(self)
|
||||
if not self.access_token or get_timestamp() > self.access_token_expire_time - 60 then
|
||||
self:refresh_access_token()
|
||||
end
|
||||
return self.access_token
|
||||
end
|
||||
|
||||
|
||||
function _M.refresh_access_token(self)
|
||||
local http_new = http.new()
|
||||
local res, err = http_new:request_uri(self.token_uri, {
|
||||
ssl_verify = self.ssl_verify,
|
||||
method = "POST",
|
||||
body = ngx_encode_args({
|
||||
grant_type = "urn:ietf:params:oauth:grant-type:jwt-bearer",
|
||||
assertion = self:generate_jwt_token()
|
||||
}),
|
||||
headers = {
|
||||
["Content-Type"] = "application/x-www-form-urlencoded",
|
||||
},
|
||||
})
|
||||
|
||||
if not res then
|
||||
core.log.error("failed to refresh google oauth access token, ", err)
|
||||
return
|
||||
end
|
||||
|
||||
if res.status ~= 200 then
|
||||
core.log.error("failed to refresh google oauth access token: ", res.body)
|
||||
return
|
||||
end
|
||||
|
||||
res, err = core.json.decode(res.body)
|
||||
if not res then
|
||||
core.log.error("failed to parse google oauth response data: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
self.access_token = res.access_token
|
||||
self.access_token_type = res.token_type
|
||||
self.access_token_expire_time = get_timestamp() + res.expires_in
|
||||
end
|
||||
|
||||
|
||||
function _M.generate_jwt_token(self)
|
||||
local payload = core.json.encode({
|
||||
iss = self.client_email,
|
||||
aud = self.token_uri,
|
||||
scope = self.scope,
|
||||
iat = get_timestamp(),
|
||||
exp = get_timestamp() + (60 * 60)
|
||||
})
|
||||
|
||||
local jwt_token = jwt:sign(self.private_key, {
|
||||
header = { alg = "RS256", typ = "JWT" },
|
||||
payload = payload,
|
||||
})
|
||||
|
||||
return jwt_token
|
||||
end
|
||||
|
||||
|
||||
function _M.new(config, ssl_verify)
|
||||
local oauth = {
|
||||
client_email = config.client_email,
|
||||
private_key = config.private_key,
|
||||
project_id = config.project_id,
|
||||
token_uri = config.token_uri or "https://oauth2.googleapis.com/token",
|
||||
auth_uri = config.auth_uri or "https://accounts.google.com/o/oauth2/auth",
|
||||
entries_uri = config.entries_uri,
|
||||
access_token = nil,
|
||||
access_token_type = nil,
|
||||
access_token_expire_time = 0,
|
||||
}
|
||||
|
||||
oauth.ssl_verify = ssl_verify
|
||||
|
||||
if config.scope then
|
||||
if type(config.scope) == "string" then
|
||||
oauth.scope = config.scope
|
||||
end
|
||||
|
||||
if type(config.scope) == "table" then
|
||||
oauth.scope = core.table.concat(config.scope, " ")
|
||||
end
|
||||
end
|
||||
|
||||
return setmetatable(oauth, { __index = _M })
|
||||
end
|
||||
|
||||
|
||||
return _M
|
403
CloudronPackages/APISIX/apisix-source/apisix/utils/log-util.lua
Normal file
403
CloudronPackages/APISIX/apisix-source/apisix/utils/log-util.lua
Normal file
@@ -0,0 +1,403 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
local core = require("apisix.core")
|
||||
local plugin = require("apisix.plugin")
|
||||
local expr = require("resty.expr.v1")
|
||||
local content_decode = require("apisix.utils.content-decode")
|
||||
local ngx = ngx
|
||||
local pairs = pairs
|
||||
local ngx_now = ngx.now
|
||||
local ngx_header = ngx.header
|
||||
local os_date = os.date
|
||||
local str_byte = string.byte
|
||||
local str_sub = string.sub
|
||||
local math_floor = math.floor
|
||||
local ngx_update_time = ngx.update_time
|
||||
local req_get_body_data = ngx.req.get_body_data
|
||||
local is_http = ngx.config.subsystem == "http"
|
||||
local req_get_body_file = ngx.req.get_body_file
|
||||
local MAX_REQ_BODY = 524288 -- 512 KiB
|
||||
local MAX_RESP_BODY = 524288 -- 512 KiB
|
||||
local io = io
|
||||
|
||||
local lru_log_format = core.lrucache.new({
|
||||
ttl = 300, count = 512
|
||||
})
|
||||
|
||||
local _M = {}
|
||||
|
||||
|
||||
local function get_request_body(max_bytes)
|
||||
local req_body = req_get_body_data()
|
||||
if req_body then
|
||||
if max_bytes and #req_body >= max_bytes then
|
||||
req_body = str_sub(req_body, 1, max_bytes)
|
||||
end
|
||||
return req_body
|
||||
end
|
||||
|
||||
local file_name = req_get_body_file()
|
||||
if not file_name then
|
||||
return nil
|
||||
end
|
||||
|
||||
core.log.info("attempt to read body from file: ", file_name)
|
||||
|
||||
local f, err = io.open(file_name, 'r')
|
||||
if not f then
|
||||
return nil, "fail to open file " .. err
|
||||
end
|
||||
|
||||
req_body = f:read(max_bytes)
|
||||
f:close()
|
||||
|
||||
return req_body
|
||||
end
|
||||
|
||||
|
||||
local function gen_log_format(format)
|
||||
local log_format = {}
|
||||
for k, var_name in pairs(format) do
|
||||
if var_name:byte(1, 1) == str_byte("$") then
|
||||
log_format[k] = {true, var_name:sub(2)}
|
||||
else
|
||||
log_format[k] = {false, var_name}
|
||||
end
|
||||
end
|
||||
core.log.info("log_format: ", core.json.delay_encode(log_format))
|
||||
return log_format
|
||||
end
|
||||
|
||||
|
||||
local function get_custom_format_log(ctx, format, max_req_body_bytes)
|
||||
local log_format = lru_log_format(format or "", nil, gen_log_format, format)
|
||||
local entry = core.table.new(0, core.table.nkeys(log_format))
|
||||
for k, var_attr in pairs(log_format) do
|
||||
if var_attr[1] then
|
||||
local key = var_attr[2]
|
||||
if key == "request_body" then
|
||||
local max_req_body_bytes = max_req_body_bytes or MAX_REQ_BODY
|
||||
local req_body, err = get_request_body(max_req_body_bytes)
|
||||
if err then
|
||||
core.log.error("fail to get request body: ", err)
|
||||
else
|
||||
entry[k] = req_body
|
||||
end
|
||||
else
|
||||
entry[k] = ctx.var[var_attr[2]]
|
||||
end
|
||||
else
|
||||
entry[k] = var_attr[2]
|
||||
end
|
||||
end
|
||||
|
||||
local matched_route = ctx.matched_route and ctx.matched_route.value
|
||||
if matched_route then
|
||||
entry.service_id = matched_route.service_id
|
||||
entry.route_id = matched_route.id
|
||||
end
|
||||
return entry
|
||||
end
|
||||
-- export the log getter so we can mock in tests
|
||||
_M.get_custom_format_log = get_custom_format_log
|
||||
|
||||
|
||||
-- for test
|
||||
function _M.inject_get_custom_format_log(f)
|
||||
get_custom_format_log = f
|
||||
_M.get_custom_format_log = f
|
||||
end
|
||||
|
||||
|
||||
local function latency_details_in_ms(ctx)
|
||||
local latency = (ngx_now() - ngx.req.start_time()) * 1000
|
||||
local upstream_latency, apisix_latency = nil, latency
|
||||
|
||||
if ctx.var.upstream_response_time then
|
||||
upstream_latency = ctx.var.upstream_response_time * 1000
|
||||
apisix_latency = apisix_latency - upstream_latency
|
||||
|
||||
-- The latency might be negative, as Nginx uses different time measurements in
|
||||
-- different metrics.
|
||||
-- See https://github.com/apache/apisix/issues/5146#issuecomment-928919399
|
||||
if apisix_latency < 0 then
|
||||
apisix_latency = 0
|
||||
end
|
||||
end
|
||||
|
||||
return latency, upstream_latency, apisix_latency
|
||||
end
|
||||
_M.latency_details_in_ms = latency_details_in_ms
|
||||
|
||||
|
||||
local function get_full_log(ngx, conf)
|
||||
local ctx = ngx.ctx.api_ctx
|
||||
local var = ctx.var
|
||||
local service_id
|
||||
local route_id
|
||||
local url = var.scheme .. "://" .. var.host .. ":" .. var.server_port
|
||||
.. var.request_uri
|
||||
local matched_route = ctx.matched_route and ctx.matched_route.value
|
||||
|
||||
if matched_route then
|
||||
service_id = matched_route.service_id or ""
|
||||
route_id = matched_route.id
|
||||
else
|
||||
service_id = var.host
|
||||
end
|
||||
|
||||
local consumer
|
||||
if ctx.consumer then
|
||||
consumer = {
|
||||
username = ctx.consumer.username
|
||||
}
|
||||
end
|
||||
|
||||
local latency, upstream_latency, apisix_latency = latency_details_in_ms(ctx)
|
||||
|
||||
local log = {
|
||||
request = {
|
||||
url = url,
|
||||
uri = var.request_uri,
|
||||
method = ngx.req.get_method(),
|
||||
headers = ngx.req.get_headers(),
|
||||
querystring = ngx.req.get_uri_args(),
|
||||
size = var.request_length
|
||||
},
|
||||
response = {
|
||||
status = ngx.status,
|
||||
headers = ngx.resp.get_headers(),
|
||||
size = var.bytes_sent
|
||||
},
|
||||
server = {
|
||||
hostname = core.utils.gethostname(),
|
||||
version = core.version.VERSION
|
||||
},
|
||||
upstream = var.upstream_addr,
|
||||
service_id = service_id,
|
||||
route_id = route_id,
|
||||
consumer = consumer,
|
||||
client_ip = core.request.get_remote_client_ip(ngx.ctx.api_ctx),
|
||||
start_time = ngx.req.start_time() * 1000,
|
||||
latency = latency,
|
||||
upstream_latency = upstream_latency,
|
||||
apisix_latency = apisix_latency
|
||||
}
|
||||
|
||||
if ctx.resp_body then
|
||||
log.response.body = ctx.resp_body
|
||||
end
|
||||
|
||||
if conf.include_req_body then
|
||||
|
||||
local log_request_body = true
|
||||
|
||||
if conf.include_req_body_expr then
|
||||
|
||||
if not conf.request_expr then
|
||||
local request_expr, err = expr.new(conf.include_req_body_expr)
|
||||
if not request_expr then
|
||||
core.log.error('generate request expr err ' .. err)
|
||||
return log
|
||||
end
|
||||
conf.request_expr = request_expr
|
||||
end
|
||||
|
||||
local result = conf.request_expr:eval(ctx.var)
|
||||
|
||||
if not result then
|
||||
log_request_body = false
|
||||
end
|
||||
end
|
||||
|
||||
if log_request_body then
|
||||
local max_req_body_bytes = conf.max_req_body_bytes or MAX_REQ_BODY
|
||||
local body, err = get_request_body(max_req_body_bytes)
|
||||
if err then
|
||||
core.log.error("fail to get request body: ", err)
|
||||
return
|
||||
end
|
||||
log.request.body = body
|
||||
end
|
||||
end
|
||||
|
||||
return log
|
||||
end
|
||||
_M.get_full_log = get_full_log
|
||||
|
||||
|
||||
-- for test
|
||||
function _M.inject_get_full_log(f)
|
||||
get_full_log = f
|
||||
_M.get_full_log = f
|
||||
end
|
||||
|
||||
|
||||
local function is_match(match, ctx)
|
||||
local match_result
|
||||
for _, m in pairs(match) do
|
||||
local expr, _ = expr.new(m)
|
||||
match_result = expr:eval(ctx.var)
|
||||
if match_result then
|
||||
break
|
||||
end
|
||||
end
|
||||
|
||||
return match_result
|
||||
end
|
||||
|
||||
|
||||
function _M.get_log_entry(plugin_name, conf, ctx)
|
||||
-- If the "match" configuration is set and the matching conditions are not met,
|
||||
-- then do not log the message.
|
||||
if conf.match and not is_match(conf.match, ctx) then
|
||||
return
|
||||
end
|
||||
|
||||
local metadata = plugin.plugin_metadata(plugin_name)
|
||||
core.log.info("metadata: ", core.json.delay_encode(metadata))
|
||||
|
||||
local entry
|
||||
local customized = false
|
||||
|
||||
local has_meta_log_format = metadata and metadata.value.log_format
|
||||
and core.table.nkeys(metadata.value.log_format) > 0
|
||||
|
||||
if conf.log_format or has_meta_log_format then
|
||||
customized = true
|
||||
entry = get_custom_format_log(ctx, conf.log_format or metadata.value.log_format,
|
||||
conf.max_req_body_bytes)
|
||||
else
|
||||
if is_http then
|
||||
entry = get_full_log(ngx, conf)
|
||||
else
|
||||
-- get_full_log doesn't work in stream
|
||||
core.log.error(plugin_name, "'s log_format is not set")
|
||||
end
|
||||
end
|
||||
|
||||
return entry, customized
|
||||
end
|
||||
|
||||
|
||||
function _M.get_req_original(ctx, conf)
|
||||
local data = {
|
||||
ctx.var.request, "\r\n"
|
||||
}
|
||||
for k, v in pairs(ngx.req.get_headers()) do
|
||||
core.table.insert_tail(data, k, ": ", v, "\r\n")
|
||||
end
|
||||
core.table.insert(data, "\r\n")
|
||||
|
||||
if conf.include_req_body then
|
||||
local max_req_body_bytes = conf.max_req_body_bytes or MAX_REQ_BODY
|
||||
local req_body = get_request_body(max_req_body_bytes)
|
||||
core.table.insert(data, req_body)
|
||||
end
|
||||
|
||||
return core.table.concat(data, "")
|
||||
end
|
||||
|
||||
|
||||
function _M.check_log_schema(conf)
|
||||
if conf.include_req_body_expr then
|
||||
local ok, err = expr.new(conf.include_req_body_expr)
|
||||
if not ok then
|
||||
return nil, "failed to validate the 'include_req_body_expr' expression: " .. err
|
||||
end
|
||||
end
|
||||
if conf.include_resp_body_expr then
|
||||
local ok, err = expr.new(conf.include_resp_body_expr)
|
||||
if not ok then
|
||||
return nil, "failed to validate the 'include_resp_body_expr' expression: " .. err
|
||||
end
|
||||
end
|
||||
return true, nil
|
||||
end
|
||||
|
||||
|
||||
function _M.collect_body(conf, ctx)
|
||||
if conf.include_resp_body then
|
||||
local log_response_body = true
|
||||
|
||||
if conf.include_resp_body_expr then
|
||||
if not conf.response_expr then
|
||||
local response_expr, err = expr.new(conf.include_resp_body_expr)
|
||||
if not response_expr then
|
||||
core.log.error('generate response expr err ' .. err)
|
||||
return
|
||||
end
|
||||
conf.response_expr = response_expr
|
||||
end
|
||||
|
||||
if ctx.res_expr_eval_result == nil then
|
||||
ctx.res_expr_eval_result = conf.response_expr:eval(ctx.var)
|
||||
end
|
||||
|
||||
if not ctx.res_expr_eval_result then
|
||||
log_response_body = false
|
||||
end
|
||||
end
|
||||
|
||||
if log_response_body then
|
||||
local max_resp_body_bytes = conf.max_resp_body_bytes or MAX_RESP_BODY
|
||||
|
||||
if ctx._resp_body_bytes and ctx._resp_body_bytes >= max_resp_body_bytes then
|
||||
return
|
||||
end
|
||||
local final_body = core.response.hold_body_chunk(ctx, true, max_resp_body_bytes)
|
||||
if not final_body then
|
||||
return
|
||||
end
|
||||
|
||||
local response_encoding = ngx_header["Content-Encoding"]
|
||||
if not response_encoding then
|
||||
ctx.resp_body = final_body
|
||||
return
|
||||
end
|
||||
|
||||
local decoder = content_decode.dispatch_decoder(response_encoding)
|
||||
if not decoder then
|
||||
core.log.warn("unsupported compression encoding type: ",
|
||||
response_encoding)
|
||||
ctx.resp_body = final_body
|
||||
return
|
||||
end
|
||||
|
||||
local decoded_body, err = decoder(final_body)
|
||||
if err ~= nil then
|
||||
core.log.warn("try decode compressed data err: ", err)
|
||||
ctx.resp_body = final_body
|
||||
return
|
||||
end
|
||||
|
||||
ctx.resp_body = decoded_body
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
function _M.get_rfc3339_zulu_timestamp(timestamp)
|
||||
ngx_update_time()
|
||||
local now = timestamp or ngx_now()
|
||||
local second = math_floor(now)
|
||||
local millisecond = math_floor((now - second) * 1000)
|
||||
return os_date("!%Y-%m-%dT%T.", second) .. core.string.format("%03dZ", millisecond)
|
||||
end
|
||||
|
||||
|
||||
return _M
|
@@ -0,0 +1,81 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
|
||||
local policy_to_additional_properties = {
|
||||
redis = {
|
||||
properties = {
|
||||
redis_host = {
|
||||
type = "string", minLength = 2
|
||||
},
|
||||
redis_port = {
|
||||
type = "integer", minimum = 1, default = 6379,
|
||||
},
|
||||
redis_username = {
|
||||
type = "string", minLength = 1,
|
||||
},
|
||||
redis_password = {
|
||||
type = "string", minLength = 0,
|
||||
},
|
||||
redis_database = {
|
||||
type = "integer", minimum = 0, default = 0,
|
||||
},
|
||||
redis_timeout = {
|
||||
type = "integer", minimum = 1, default = 1000,
|
||||
},
|
||||
redis_ssl = {
|
||||
type = "boolean", default = false,
|
||||
},
|
||||
redis_ssl_verify = {
|
||||
type = "boolean", default = false,
|
||||
},
|
||||
},
|
||||
required = {"redis_host"},
|
||||
},
|
||||
["redis-cluster"] = {
|
||||
properties = {
|
||||
redis_cluster_nodes = {
|
||||
type = "array",
|
||||
minItems = 1,
|
||||
items = {
|
||||
type = "string", minLength = 2, maxLength = 100
|
||||
},
|
||||
},
|
||||
redis_password = {
|
||||
type = "string", minLength = 0,
|
||||
},
|
||||
redis_timeout = {
|
||||
type = "integer", minimum = 1, default = 1000,
|
||||
},
|
||||
redis_cluster_name = {
|
||||
type = "string",
|
||||
},
|
||||
redis_cluster_ssl = {
|
||||
type = "boolean", default = false,
|
||||
},
|
||||
redis_cluster_ssl_verify = {
|
||||
type = "boolean", default = false,
|
||||
},
|
||||
},
|
||||
required = {"redis_cluster_nodes", "redis_cluster_name"},
|
||||
},
|
||||
}
|
||||
|
||||
local _M = {
|
||||
schema = policy_to_additional_properties
|
||||
}
|
||||
|
||||
return _M
|
74
CloudronPackages/APISIX/apisix-source/apisix/utils/redis.lua
Normal file
74
CloudronPackages/APISIX/apisix-source/apisix/utils/redis.lua
Normal file
@@ -0,0 +1,74 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
local redis_new = require("resty.redis").new
|
||||
local core = require("apisix.core")
|
||||
|
||||
|
||||
local _M = {version = 0.1}
|
||||
|
||||
local function redis_cli(conf)
|
||||
local red = redis_new()
|
||||
local timeout = conf.redis_timeout or 1000 -- default 1sec
|
||||
|
||||
red:set_timeouts(timeout, timeout, timeout)
|
||||
|
||||
local sock_opts = {
|
||||
ssl = conf.redis_ssl,
|
||||
ssl_verify = conf.redis_ssl_verify
|
||||
}
|
||||
|
||||
local ok, err = red:connect(conf.redis_host, conf.redis_port or 6379, sock_opts)
|
||||
if not ok then
|
||||
core.log.error(" redis connect error, error: ", err)
|
||||
return false, err
|
||||
end
|
||||
|
||||
local count
|
||||
count, err = red:get_reused_times()
|
||||
if 0 == count then
|
||||
if conf.redis_password and conf.redis_password ~= '' then
|
||||
local ok, err
|
||||
if conf.redis_username then
|
||||
ok, err = red:auth(conf.redis_username, conf.redis_password)
|
||||
else
|
||||
ok, err = red:auth(conf.redis_password)
|
||||
end
|
||||
if not ok then
|
||||
return nil, err
|
||||
end
|
||||
end
|
||||
|
||||
-- select db
|
||||
if conf.redis_database ~= 0 then
|
||||
local ok, err = red:select(conf.redis_database)
|
||||
if not ok then
|
||||
return false, "failed to change redis db, err: " .. err
|
||||
end
|
||||
end
|
||||
elseif err then
|
||||
-- core.log.info(" err: ", err)
|
||||
return nil, err
|
||||
end
|
||||
return red, nil
|
||||
end
|
||||
|
||||
|
||||
function _M.new(conf)
|
||||
return redis_cli(conf)
|
||||
end
|
||||
|
||||
return _M
|
@@ -0,0 +1,60 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
local rediscluster = require("resty.rediscluster")
|
||||
local core = require("apisix.core")
|
||||
local ipairs = ipairs
|
||||
|
||||
local _M = {version = 0.1}
|
||||
|
||||
local function new_redis_cluster(conf, dict_name)
|
||||
local config = {
|
||||
name = conf.redis_cluster_name,
|
||||
serv_list = {},
|
||||
read_timeout = conf.redis_timeout,
|
||||
auth = conf.redis_password,
|
||||
dict_name = dict_name,
|
||||
connect_opts = {
|
||||
ssl = conf.redis_cluster_ssl,
|
||||
ssl_verify = conf.redis_cluster_ssl_verify,
|
||||
}
|
||||
}
|
||||
|
||||
for i, conf_item in ipairs(conf.redis_cluster_nodes) do
|
||||
local host, port, err = core.utils.parse_addr(conf_item)
|
||||
if err then
|
||||
return nil, "failed to parse address: " .. conf_item
|
||||
.. " err: " .. err
|
||||
end
|
||||
|
||||
config.serv_list[i] = {ip = host, port = port}
|
||||
end
|
||||
|
||||
local red_cli, err = rediscluster:new(config)
|
||||
if not red_cli then
|
||||
return nil, "failed to new redis cluster: " .. err
|
||||
end
|
||||
|
||||
return red_cli
|
||||
end
|
||||
|
||||
|
||||
function _M.new(conf, dict_name)
|
||||
return new_redis_cluster(conf, dict_name)
|
||||
end
|
||||
|
||||
|
||||
return _M
|
114
CloudronPackages/APISIX/apisix-source/apisix/utils/rfc5424.lua
Normal file
114
CloudronPackages/APISIX/apisix-source/apisix/utils/rfc5424.lua
Normal file
@@ -0,0 +1,114 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
local LOG_EMERG = 0 -- system is unusable
|
||||
local LOG_ALERT = 1 -- action must be taken immediately
|
||||
local LOG_CRIT = 2 -- critical conditions
|
||||
local LOG_ERR = 3 -- error conditions
|
||||
local LOG_WARNING = 4 -- warning conditions
|
||||
local LOG_NOTICE = 5 -- normal but significant condition
|
||||
local LOG_INFO = 6 -- informational
|
||||
local LOG_DEBUG = 7 -- debug-level messages
|
||||
|
||||
local LOG_KERN = 0 -- kernel messages
|
||||
local LOG_USER = 1 -- random user-level messages
|
||||
local LOG_MAIL = 2 -- mail system
|
||||
local LOG_DAEMON = 3 -- system daemons
|
||||
local LOG_AUTH = 4 -- security/authorization messages
|
||||
local LOG_SYSLOG = 5 -- messages generated internally by syslogd
|
||||
local LOG_LPR = 6 -- line printer subsystem
|
||||
local LOG_NEWS = 7 -- network news subsystem
|
||||
local LOG_UUCP = 8 -- UUCP subsystem
|
||||
local LOG_CRON = 9 -- clock daemon
|
||||
local LOG_AUTHPRIV = 10 -- security/authorization messages (private)
|
||||
local LOG_FTP = 11 -- FTP daemon
|
||||
local LOG_LOCAL0 = 16 -- reserved for local use
|
||||
local LOG_LOCAL1 = 17 -- reserved for local use
|
||||
local LOG_LOCAL2 = 18 -- reserved for local use
|
||||
local LOG_LOCAL3 = 19 -- reserved for local use
|
||||
local LOG_LOCAL4 = 20 -- reserved for local use
|
||||
local LOG_LOCAL5 = 21 -- reserved for local use
|
||||
local LOG_LOCAL6 = 22 -- reserved for local use
|
||||
local LOG_LOCAL7 = 23 -- reserved for local use
|
||||
|
||||
local Facility = {
|
||||
KERN = LOG_KERN,
|
||||
USER = LOG_USER,
|
||||
MAIL = LOG_MAIL,
|
||||
DAEMON = LOG_DAEMON,
|
||||
AUTH = LOG_AUTH,
|
||||
SYSLOG = LOG_SYSLOG,
|
||||
LPR = LOG_LPR,
|
||||
NEWS = LOG_NEWS,
|
||||
UUCP = LOG_UUCP,
|
||||
CRON = LOG_CRON,
|
||||
AUTHPRIV = LOG_AUTHPRIV,
|
||||
FTP = LOG_FTP,
|
||||
LOCAL0 = LOG_LOCAL0,
|
||||
LOCAL1 = LOG_LOCAL1,
|
||||
LOCAL2 = LOG_LOCAL2,
|
||||
LOCAL3 = LOG_LOCAL3,
|
||||
LOCAL4 = LOG_LOCAL4,
|
||||
LOCAL5 = LOG_LOCAL5,
|
||||
LOCAL6 = LOG_LOCAL6,
|
||||
LOCAL7 = LOG_LOCAL7,
|
||||
}
|
||||
|
||||
local Severity = {
|
||||
EMEGR = LOG_EMERG,
|
||||
ALERT = LOG_ALERT,
|
||||
CRIT = LOG_CRIT,
|
||||
ERR = LOG_ERR,
|
||||
WARNING = LOG_WARNING,
|
||||
NOTICE = LOG_NOTICE,
|
||||
INFO = LOG_INFO,
|
||||
DEBUG = LOG_DEBUG,
|
||||
}
|
||||
|
||||
local log_util = require("apisix.utils.log-util")
|
||||
local ipairs = ipairs
|
||||
local str_format = string.format
|
||||
|
||||
local _M = { version = 0.1 }
|
||||
|
||||
|
||||
function _M.encode(facility, severity, hostname, appname, pid, msg, structured_data)
|
||||
local pri = (Facility[facility] * 8 + Severity[severity])
|
||||
local t = log_util.get_rfc3339_zulu_timestamp()
|
||||
if not hostname then
|
||||
hostname = "-"
|
||||
end
|
||||
|
||||
if not appname then
|
||||
appname = "-"
|
||||
end
|
||||
|
||||
local structured_data_str = "-"
|
||||
|
||||
if structured_data then
|
||||
structured_data_str = "[logservice"
|
||||
for _, sd_param in ipairs(structured_data) do
|
||||
structured_data_str = structured_data_str .. " " .. sd_param.name
|
||||
.. "=\"" .. sd_param.value .. "\""
|
||||
end
|
||||
structured_data_str = structured_data_str .. "]"
|
||||
end
|
||||
|
||||
return str_format("<%d>1 %s %s %s %d - %s %s\n", pri, t, hostname,
|
||||
appname, pid, structured_data_str, msg)
|
||||
end
|
||||
|
||||
return _M
|
@@ -0,0 +1,34 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
local resty_router = require("resty.radixtree")
|
||||
|
||||
|
||||
local _M = {}
|
||||
|
||||
do
|
||||
local router_opts = {
|
||||
no_param_match = true
|
||||
}
|
||||
|
||||
function _M.new(routes)
|
||||
return resty_router.new(routes, router_opts)
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
|
||||
return _M
|
133
CloudronPackages/APISIX/apisix-source/apisix/utils/upstream.lua
Normal file
133
CloudronPackages/APISIX/apisix-source/apisix/utils/upstream.lua
Normal file
@@ -0,0 +1,133 @@
|
||||
--
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
-- contributor license agreements. See the NOTICE file distributed with
|
||||
-- this work for additional information regarding copyright ownership.
|
||||
-- The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
-- (the "License"); you may not use this file except in compliance with
|
||||
-- the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
local core = require("apisix.core")
|
||||
local ipmatcher = require("resty.ipmatcher")
|
||||
local ngx_now = ngx.now
|
||||
local ipairs = ipairs
|
||||
local type = type
|
||||
local tostring = tostring
|
||||
|
||||
|
||||
local _M = {}
|
||||
|
||||
|
||||
local function sort_by_key_host(a, b)
|
||||
return a.host < b.host
|
||||
end
|
||||
|
||||
|
||||
local function compare_upstream_node(up_conf, new_t)
|
||||
if up_conf == nil then
|
||||
return false
|
||||
end
|
||||
|
||||
-- fast path
|
||||
local old_t = up_conf.nodes
|
||||
if old_t == new_t then
|
||||
return true
|
||||
end
|
||||
|
||||
if type(old_t) ~= "table" then
|
||||
return false
|
||||
end
|
||||
|
||||
-- slow path
|
||||
core.log.debug("compare upstream nodes by value, ",
|
||||
"old: ", tostring(old_t) , " ", core.json.delay_encode(old_t, true),
|
||||
"new: ", tostring(new_t) , " ", core.json.delay_encode(new_t, true))
|
||||
|
||||
if up_conf.original_nodes then
|
||||
-- if original_nodes is set, it means that the upstream nodes
|
||||
-- are changed by `fill_node_info`, so we need to compare the new nodes with the
|
||||
-- original nodes.
|
||||
old_t = up_conf.original_nodes
|
||||
end
|
||||
|
||||
if #new_t ~= #old_t then
|
||||
return false
|
||||
end
|
||||
|
||||
core.table.sort(old_t, sort_by_key_host)
|
||||
core.table.sort(new_t, sort_by_key_host)
|
||||
|
||||
for i = 1, #new_t do
|
||||
local new_node = new_t[i]
|
||||
local old_node = old_t[i]
|
||||
for _, name in ipairs({"host", "port", "weight", "priority", "metadata"}) do
|
||||
if new_node[name] ~= old_node[name] then
|
||||
return false
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
return true
|
||||
end
|
||||
_M.compare_upstream_node = compare_upstream_node
|
||||
|
||||
|
||||
local function parse_domain_for_nodes(nodes)
|
||||
local new_nodes = core.table.new(#nodes, 0)
|
||||
for _, node in ipairs(nodes) do
|
||||
local host = node.host
|
||||
if not ipmatcher.parse_ipv4(host) and
|
||||
not ipmatcher.parse_ipv6(host) then
|
||||
local ip, err = core.resolver.parse_domain(host)
|
||||
if ip then
|
||||
local new_node = core.table.clone(node)
|
||||
new_node.host = ip
|
||||
new_node.domain = host
|
||||
core.table.insert(new_nodes, new_node)
|
||||
end
|
||||
|
||||
if err then
|
||||
core.log.error("dns resolver domain: ", host, " error: ", err)
|
||||
end
|
||||
else
|
||||
core.table.insert(new_nodes, node)
|
||||
end
|
||||
end
|
||||
return new_nodes
|
||||
end
|
||||
_M.parse_domain_for_nodes = parse_domain_for_nodes
|
||||
|
||||
|
||||
function _M.parse_domain_in_up(up)
|
||||
local nodes = up.value.nodes
|
||||
local new_nodes, err = parse_domain_for_nodes(nodes)
|
||||
if not new_nodes then
|
||||
return nil, err
|
||||
end
|
||||
|
||||
local ok = compare_upstream_node(up.dns_value, new_nodes)
|
||||
if ok then
|
||||
return up
|
||||
end
|
||||
|
||||
if not up.orig_modifiedIndex then
|
||||
up.orig_modifiedIndex = up.modifiedIndex
|
||||
end
|
||||
up.modifiedIndex = up.orig_modifiedIndex .. "#" .. ngx_now()
|
||||
|
||||
up.dns_value = core.table.clone(up.value)
|
||||
up.dns_value.nodes = new_nodes
|
||||
core.log.info("resolve upstream which contain domain: ",
|
||||
core.json.delay_encode(up, true))
|
||||
return up
|
||||
end
|
||||
|
||||
|
||||
return _M
|
Reference in New Issue
Block a user