local function peer_ok(ctx, is_backup, id, peer)
debug("peer ", peer.name, " was checked to be ok")
local u = ctx.upstream
local dict = ctx.dict
local key = gen_peer_key("ok:", u, is_backup, id)
local succ, err = dict:get(key)
if not succ then
if err then
errlog("failed to get peer ok key: ", err)
return
end
succ = 1
-- below may have a race condition, but it is fine for our
-- purpose here.
local ok, err = dict:set(key, 1)
if not ok then
errlog("failed to set peer ok key: ", err)
end
else
succ = succ + 1
local ok, err = dict:incr(key, 1)
if not ok then
errlog("failed to incr peer ok key: ", err)
end
end
if succ == 1 then
key = gen_peer_key("nok:", u, is_backup, id)
local fails, err = dict:get(key)
if not fails or fails == 0 then
if err then
errlog("failed to get peer nok key: ", err)
return
end
else
local ok, err = dict:set(key, 0)
if not ok then
errlog("failed to set peer nok key: ", err)
end
end
end
if peer.down and succ >= ctx.rise then
warn("peer ", peer.name, " is turned up after ", succ,
" success(es)")
peer.down = nil
set_peer_down_globally(ctx, is_backup, id, nil)
end
end
在 2016年2月4日星期四 UTC+8下午6:00:26,Guanglin Lv写道:
在 2016年2月2日星期二 UTC+8下午5:13:08,Kenvin写道:
我在使用openresy配置单个upstrem healthchek获取在状态是正确。在使用多个upstrem 时,status 一直是 up状态。求解惑。
我之前也遇到过类似的情况,一般是由于reload nginx导致的,你可以在init_bu_lua阶段flush一下lua_shared_dict,但这样你之前的统计就没有了;
还可以改下peer_ok和peer_fail函数,把peer.down的获取改成获取全局的值,即从lua_shared_dict中获取;
thanks.