Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
No results found
Show changes
Showing
with 2476 additions and 574 deletions
-- SPDX-License-Identifier: GPL-3.0-or-later
local ffi = require('ffi')
log_info(ffi.C.LOG_GRP_TESTS, 'my PID = %d', worker.pid)
trust_anchors.remove('.')
cache.size = 2*MB
net = { '{{SELF_ADDR}}' }
{% if QMIN == "false" %}
option('NO_MINIMIZE', true)
{% else %}
option('NO_MINIMIZE', false)
{% endif %}
-- Self-checks on globals
assert(help() ~= nil)
assert(worker.id ~= nil)
-- Self-checks on facilities
assert(cache.count() == 0)
assert(cache.stats() ~= nil)
assert(cache.backends() ~= nil)
assert(worker.stats() ~= nil)
assert(net.interfaces() ~= nil)
-- Self-checks on loaded stuff
assert(#modules.list() > 0)
-- Self-check timers
ev = event.recurrent(1 * sec, function (ev) return 1 end)
event.cancel(ev)
local kluautil = require('kluautil')
local tap = require('tapered')
local checks_total = 16
local n_instances = 3 -- must match deckard.yaml
worker.control_path = worker.cwd .. '/../kresd3/control/'
net.listen(worker.control_path .. worker.pid, nil, {kind = 'control'})
assert(#net.list() >= 3) -- UDP, TCP, control
-- debug, kept for future use
--log_level("debug")
log_debug(ffi.C.LOG_GRP_TESTS, '%s', worker.control_path)
log_debug(ffi.C.LOG_GRP_TESTS, '%s', table_print(net.list()))
function wait_for_sockets()
log_info(ffi.C.LOG_GRP_TESTS, 'waiting for control sockets')
local timeout = 5000 -- ms
local start_time = tonumber(ffi.C.kr_now())
local now
while true do
now = tonumber(ffi.C.kr_now())
if now > start_time + timeout then
log_info(ffi.C.LOG_GRP_TESTS, 'timeout while waiting for control sockets to appear')
os.exit(3)
end
local pids = kluautil.list_dir(worker.control_path)
if #pids == n_instances then
-- debug, kept for future use
log_debug(ffi.C.LOG_GRP_TESTS, 'got control sockets:')
log_debug(ffi.C.LOG_GRP_TESTS, table_print(pids))
break
else
worker.sleep(0.1)
end
end
log_info(ffi.C.LOG_GRP_TESTS, 'PIDs are visible now (waiting took %d ms)', now - start_time)
end
-- expression should throw Lua error:
-- wrap it in a function which runs the expression on leader and follower
-- separately so we can guarantee both cases are covered
function boom_follower_and_leader(boom_expr, desc)
local variants = {leader = '~=', follower = '=='}
for name, operator in pairs(variants) do
-- beware, newline is not allowed in expr
local full_expr = string.format(
'if (worker.pid %s %s) then return true '
.. 'else return %s end',
operator, worker.pid, boom_expr)
local full_desc = name .. ': '
if desc then
full_desc = full_desc .. desc .. ' (' .. boom_expr .. ')'
else
full_desc = full_desc .. boom_expr
end
tap.boom(map, {full_expr}, full_desc)
end
end
function tests()
-- add delay to each test to force scheduler to interleave tests and DNS queries
local test_delay = 20 / 1000 -- seconds
log_info(ffi.C.LOG_GRP_TESTS, 'starting map() tests now')
tap.boom(map, {'1 ++ 1'}, 'syntax error in command is detected')
worker.sleep(test_delay)
-- array of integers
local pids = map('worker.pid')
tap.same(pids.n, n_instances, 'all pids were obtained')
table.sort(pids)
worker.sleep(test_delay)
-- expression produces array of integers
local pids_plus_one = map('worker.pid + 1')
tap.same(pids_plus_one.n, n_instances, 'all pids were obtained')
table.sort(pids_plus_one)
for idx=1,n_instances do
tap.same(pids[idx] + 1, pids_plus_one[idx],
'increment expression worked')
end
worker.sleep(test_delay)
-- error detection
boom_follower_and_leader('error("explosion")')
worker.sleep(test_delay)
-- unsupported number of return values
boom_follower_and_leader('1, 2')
worker.sleep(test_delay)
boom_follower_and_leader('unpack({})')
worker.sleep(test_delay)
-- unsupported return type
boom_follower_and_leader(
'function() print("this cannot be serialized") end')
worker.sleep(test_delay)
tap.same({n = n_instances}, map('nil'),
'nil values are counted as returned')
worker.sleep(test_delay)
local exp = {n = n_instances}
for i=1,n_instances do
table.insert(exp, {nil, 2, nil, n=3})
end
local got = map('require("kluautil").kr_table_pack(nil, 2, nil)')
tap.same(got, exp, 'kr_table_pack handles nil values')
worker.sleep(test_delay)
end
local started = false
function tests_start()
-- just in case, duplicates should not happen
if started then
log_info(ffi.C.LOG_GRP_TESTS, 'huh? duplicate test invocation ignored, a retransmit?')
return
end
started = true
log_info(ffi.C.LOG_GRP_TESTS, 'start query triggered, scheduling tests')
-- DNS queries and map() commands must be serviced while sleep is running
worker.coroutine(function() worker.sleep(3600) end)
worker.coroutine(tests)
end
-- Deckard query will trigger tests
policy.add(policy.suffix(tests_start, {'\5start\0'}))
function tests_done()
print('final query triggered')
event.after(0, function()
tap.done(checks_total)
end)
end
-- Deckard query will execute tap.done() which will call os.exit()
-- i.e. this callback has to be called only after answer to Deckard was sent
policy.add(policy.suffix(tests_done, {'\4done\0'}), true)
-- add delay to each query to force scheduler to interleave tests and DNS queries
policy.add(policy.all(
function()
local delay = 10 -- ms
log_info(ffi.C.LOG_GRP_TESTS, 'packet delayed by %d ms', delay)
worker.sleep(delay / 1000)
end))
wait_for_sockets()
{% if DAEMON_NAME == "kresd1" %}
-- forward to Deckard test server
policy.add(policy.all(policy.FORWARD('192.0.2.1')))
{% else %}
-- forward to next kresd instance in chain
{# find out IP address of kresd instance with lower number,
i.e. kresd2 forwards to kresd1 #}
policy.add(policy.all(policy.FORWARD('{{ PROGRAMS[ "kresd" ~ (DAEMON_NAME[-1]|int() - 1)]["address"] }}')))
{% endif %}
; does not make any practical difference so we limit ourselves to single test run
query-minimization: off
CONFIG_END
SCENARIO_BEGIN Empty answers to any query - forwarding without validation
; forwarding target
RANGE_BEGIN 1 1000000
ADDRESS 192.0.2.1
; NODATA to everything
ENTRY_BEGIN
MATCH opcode
ADJUST copy_id copy_query
REPLY NOERROR QR
SECTION QUESTION
. IN SOA
SECTION ANSWER
. 86400 IN SOA rootns. you.test. 2017071100 1800 900 604800 86400
ENTRY_END
RANGE_END
STEP 10 QUERY
ENTRY_BEGIN
REPLY RD
SECTION QUESTION
start. IN TXT
ENTRY_END
STEP 11 CHECK_ANSWER
ENTRY_BEGIN
REPLY NOERROR QR RD RA
MATCH opcode rcode flags question answer
SECTION QUESTION
start. IN TXT
SECTION ANSWER
ENTRY_END
STEP 1001 QUERY
ENTRY_BEGIN
REPLY RD
SECTION QUESTION
test1001. IN TXT
ENTRY_END
STEP 1002 CHECK_ANSWER
ENTRY_BEGIN
REPLY NOERROR QR RD RA
MATCH opcode rcode flags question answer
SECTION QUESTION
test1001. IN TXT
SECTION ANSWER
ENTRY_END
STEP 1003 QUERY
ENTRY_BEGIN
REPLY RD
SECTION QUESTION
test1003. IN TXT
ENTRY_END
STEP 1004 CHECK_ANSWER
ENTRY_BEGIN
REPLY NOERROR QR RD RA
MATCH opcode rcode flags question answer
SECTION QUESTION
test1003. IN TXT
SECTION ANSWER
ENTRY_END
STEP 1005 QUERY
ENTRY_BEGIN
REPLY RD
SECTION QUESTION
test1005. IN TXT
ENTRY_END
STEP 1006 CHECK_ANSWER
ENTRY_BEGIN
REPLY NOERROR QR RD RA
MATCH opcode rcode flags question answer
SECTION QUESTION
test1005. IN TXT
SECTION ANSWER
ENTRY_END
STEP 1007 QUERY
ENTRY_BEGIN
REPLY RD
SECTION QUESTION
test1007. IN TXT
ENTRY_END
STEP 1008 CHECK_ANSWER
ENTRY_BEGIN
REPLY NOERROR QR RD RA
MATCH opcode rcode flags question answer
SECTION QUESTION
test1007. IN TXT
SECTION ANSWER
ENTRY_END
STEP 1009 QUERY
ENTRY_BEGIN
REPLY RD
SECTION QUESTION
test1009. IN TXT
ENTRY_END
STEP 1010 CHECK_ANSWER
ENTRY_BEGIN
REPLY NOERROR QR RD RA
MATCH opcode rcode flags question answer
SECTION QUESTION
test1009. IN TXT
SECTION ANSWER
ENTRY_END
STEP 1011 QUERY
ENTRY_BEGIN
REPLY RD
SECTION QUESTION
test1011. IN TXT
ENTRY_END
STEP 1012 CHECK_ANSWER
ENTRY_BEGIN
REPLY NOERROR QR RD RA
MATCH opcode rcode flags question answer
SECTION QUESTION
test1011. IN TXT
SECTION ANSWER
ENTRY_END
STEP 1013 QUERY
ENTRY_BEGIN
REPLY RD
SECTION QUESTION
test1013. IN TXT
ENTRY_END
STEP 1014 CHECK_ANSWER
ENTRY_BEGIN
REPLY NOERROR QR RD RA
MATCH opcode rcode flags question answer
SECTION QUESTION
test1013. IN TXT
SECTION ANSWER
ENTRY_END
STEP 1015 QUERY
ENTRY_BEGIN
REPLY RD
SECTION QUESTION
test1015. IN TXT
ENTRY_END
STEP 1016 CHECK_ANSWER
ENTRY_BEGIN
REPLY NOERROR QR RD RA
MATCH opcode rcode flags question answer
SECTION QUESTION
test1015. IN TXT
SECTION ANSWER
ENTRY_END
STEP 1017 QUERY
ENTRY_BEGIN
REPLY RD
SECTION QUESTION
test1017. IN TXT
ENTRY_END
STEP 1018 CHECK_ANSWER
ENTRY_BEGIN
REPLY NOERROR QR RD RA
MATCH opcode rcode flags question answer
SECTION QUESTION
test1017. IN TXT
SECTION ANSWER
ENTRY_END
STEP 1019 QUERY
ENTRY_BEGIN
REPLY RD
SECTION QUESTION
test1019. IN TXT
ENTRY_END
STEP 1020 CHECK_ANSWER
ENTRY_BEGIN
REPLY NOERROR QR RD RA
MATCH opcode rcode flags question answer
SECTION QUESTION
test1019. IN TXT
SECTION ANSWER
ENTRY_END
STEP 1021 QUERY
ENTRY_BEGIN
REPLY RD
SECTION QUESTION
test1021. IN TXT
ENTRY_END
STEP 1022 CHECK_ANSWER
ENTRY_BEGIN
REPLY NOERROR QR RD RA
MATCH opcode rcode flags question answer
SECTION QUESTION
test1021. IN TXT
SECTION ANSWER
ENTRY_END
STEP 1023 QUERY
ENTRY_BEGIN
REPLY RD
SECTION QUESTION
test1023. IN TXT
ENTRY_END
STEP 1024 CHECK_ANSWER
ENTRY_BEGIN
REPLY NOERROR QR RD RA
MATCH opcode rcode flags question answer
SECTION QUESTION
test1023. IN TXT
SECTION ANSWER
ENTRY_END
STEP 1025 QUERY
ENTRY_BEGIN
REPLY RD
SECTION QUESTION
test1025. IN TXT
ENTRY_END
STEP 1026 CHECK_ANSWER
ENTRY_BEGIN
REPLY NOERROR QR RD RA
MATCH opcode rcode flags question answer
SECTION QUESTION
test1025. IN TXT
SECTION ANSWER
ENTRY_END
STEP 1027 QUERY
ENTRY_BEGIN
REPLY RD
SECTION QUESTION
test1027. IN TXT
ENTRY_END
STEP 1028 CHECK_ANSWER
ENTRY_BEGIN
REPLY NOERROR QR RD RA
MATCH opcode rcode flags question answer
SECTION QUESTION
test1027. IN TXT
SECTION ANSWER
ENTRY_END
STEP 1029 QUERY
ENTRY_BEGIN
REPLY RD
SECTION QUESTION
test1029. IN TXT
ENTRY_END
STEP 1030 CHECK_ANSWER
ENTRY_BEGIN
REPLY NOERROR QR RD RA
MATCH opcode rcode flags question answer
SECTION QUESTION
test1029. IN TXT
SECTION ANSWER
ENTRY_END
STEP 1031 QUERY
ENTRY_BEGIN
REPLY RD
SECTION QUESTION
test1031. IN TXT
ENTRY_END
STEP 1032 CHECK_ANSWER
ENTRY_BEGIN
REPLY NOERROR QR RD RA
MATCH opcode rcode flags question answer
SECTION QUESTION
test1031. IN TXT
SECTION ANSWER
ENTRY_END
STEP 1033 QUERY
ENTRY_BEGIN
REPLY RD
SECTION QUESTION
done. IN TXT
ENTRY_END
STEP 1034 CHECK_ANSWER
ENTRY_BEGIN
REPLY NOERROR QR RD RA
MATCH opcode rcode flags question answer
SECTION QUESTION
done. IN TXT
SECTION ANSWER
ENTRY_END
SCENARIO_END
# daemon: lua modules
# SPDX-License-Identifier: GPL-3.0-or-later
config_tests += [
['controlsock', files('controlsock.test.lua')],
['krprint', files('krprint.test.lua')],
['log', files('log.test.lua')],
['ta', files('trust_anchors.test/ta.test.lua')],
['ta_bootstrap', files('trust_anchors.test/bootstrap.test.lua'), ['y2k38']],
]
integr_tests += [
['map', meson.current_source_dir() / 'map.test.integr'],
]
lua_config = configuration_data()
lua_config.set('keyfile_default', keyfile_default)
lua_config.set('etc_dir', etc_dir)
lua_config.set('run_dir', run_dir)
lua_config.set('systemd_cache_dir', systemd_cache_dir)
lua_config.set('unmanaged', managed_ta ? 'false' : 'true')
trust_anchors = configure_file(
input: 'trust_anchors.lua.in',
output: 'trust_anchors.lua',
configuration: lua_config,
)
sandbox = configure_file(
input: 'sandbox.lua.in',
output: 'sandbox.lua',
configuration: lua_config,
)
distro_preconfig = configure_file(
input: 'distro-preconfig.lua.in',
output: 'distro-preconfig.lua',
configuration: lua_config,
)
# Unfortunately the different ABI implies different contents of 'kres-gen.lua'.
if libknot.version().version_compare('>= 3.3')
kres_gen_fname = 'kres-gen-33.lua'
endif
# Exact types around time_t aren't easy to detect, but at least we need the same size.
time_t_size = meson.get_compiler('c').sizeof('time_t', prefix: '#include <sys/time.h>')
kres_gen_config = {}
foreach t: [ 'long', 'long long' ]
if meson.get_compiler('c').sizeof(t) == time_t_size
kres_gen_config = { 'time_t': t }
break
endif
endforeach
if kres_gen_config == {}
error('Unexpected sizeof(time_t) == @0@'.format(time_t_size))
endif
kres_gen_lua = configure_file(
input: kres_gen_fname,
output: 'kres-gen.lua',
configuration: kres_gen_config,
)
run_target( # run manually to re-generate kres-gen.lua
'kres-gen',
command: [ find_program('./kres-gen.sh'), kres_gen_fname ],
)
# A simple config test: check that sizes of some structures match
# in C and pre-generated lua bindings.
# The point is that regeneration is quite expensive in time and dependencies,
# but this basic sanity check could be ran always, except for cross compilation,
# as we *run* luajit to find out the real sizes.
if get_option('kres_gen_test') and not meson.is_cross_build()
types_to_check = [
{ 'tname': 'time_t', 'incl': '#include <sys/time.h>' },
{ 'tname': 'struct timeval', 'incl' : '#include <sys/time.h>' },
{ 'tname': 'zs_scanner_t', 'incl': '#include <libzscanner/scanner.h>', 'dep': libzscanner },
{ 'tname': 'knot_pkt_t', 'incl' : '#include <libknot/packet/pkt.h>', 'dep': libknot },
]
# Construct the lua tester as a meson string.
if meson.version().version_compare('>=1.4')
kres_gen_lua_path = kres_gen_lua.full_path()
else
kres_gen_lua_path = '@0@/../../@1@'.format(meson.current_build_dir(), kres_gen_lua)
endif
kres_gen_test_luastr = '''
dofile('@0@')
local ffi = require('ffi')
'''.format(kres_gen_lua_path)
foreach ttc: types_to_check
# We're careful with adding just includes; otherwise it's more fragile (e.g. linking flags).
if 'dep' in ttc
dep = ttc.get('dep').partial_dependency(includes: true, compile_args: true)
else
dep = []
endif
tsize = meson.get_compiler('c').sizeof(ttc.get('tname'), prefix: ttc.get('incl'),
dependencies: dep)
kres_gen_test_luastr += '''
assert(ffi.sizeof(ffi.typeof('@0@')) == @1@,
'Lua binding for C type ' .. '@0@' .. ' has incorrect size: '
.. ffi.sizeof(ffi.typeof('@0@'))
)
'''.format(ttc.get('tname'), tsize)
endforeach
# Now feed it directly into luajit.
kres_gen_test = run_command(find_program('luajit'), '-e', kres_gen_test_luastr, check: false)
if kres_gen_test.returncode() != 0
error('if you use released Knot* versions, please contact us: https://www.knot-resolver.cz/contact/\n'
+ kres_gen_test.stderr().strip())
endif
endif
lua_src = [
files('postconfig.lua'),
files('kres.lua'),
kres_gen_lua,
sandbox,
trust_anchors,
files('zonefile.lua'),
files('kluautil.lua'),
files('krprint.lua'),
distro_preconfig,
]
# install daemon lua sources
install_data(
lua_src,
install_dir: lib_dir,
)
-- SPDX-License-Identifier: GPL-3.0-or-later
local ffi = require('ffi')
local C = ffi.C
local function count_sockets()
local dns_socks = 0
local control_socks = 0
for _, socket in ipairs(net.list()) do
if socket.kind == 'control' then
control_socks = control_socks + 1
elseif (socket.kind == 'dns' or
socket.kind == 'xdp' or
socket.kind == 'tls' or
socket.kind == 'doh_legacy' or
socket.kind == 'doh2') then
dns_socks = dns_socks + 1
end
end
return dns_socks, control_socks
end
local n_dns_socks, n_control_socks = count_sockets()
-- Check and set control sockets path
worker.control_path = worker.control_path or (worker.cwd .. '/control/')
-- Bind to control socket by default
if n_control_socks == 0 and not env.KRESD_NO_LISTEN then
local path = worker.control_path..worker.pid
local ok, err = pcall(net.listen, path, nil, { kind = 'control' })
if not ok then
log_warn(C.LOG_GRP_NETWORK, 'bind to '..path..' failed '..err)
end
end
-- Listen on localhost
if n_dns_socks == 0 and not env.KRESD_NO_LISTEN then
local ok, err = pcall(net.listen, '127.0.0.1')
if not ok then
error('bind to 127.0.0.1@53 '..err)
end
-- Binding to other ifaces may fail
ok, err = pcall(net.listen, '127.0.0.1', 853)
if not ok then
log_info(ffi.C.LOG_GRP_NETWORK, 'bind to 127.0.0.1@853 '..err)
end
ok, err = pcall(net.listen, '::1')
if not ok then
log_info(ffi.C.LOG_GRP_NETWORK, 'bind to ::1@53 '..err)
end
ok, err = pcall(net.listen, '::1', 853)
if not ok then
log_info(ffi.C.LOG_GRP_NETWORK, 'bind to ::1@853 '..err)
end
-- Exit when kresd isn't listening on any interfaces
n_dns_socks, _ = count_sockets()
if n_dns_socks == 0 then
panic('not listening on any interface, exiting...')
end
end
-- Open cache if not set/disabled
if not cache.current_size then
cache.size = 100 * MB
end
-- If no addresses for root servers are set, load them from the default file
if C.kr_zonecut_is_empty(kres.context().root_hints) then
_hint_root_file()
end
-- Units
kB = 1024
MB = 1024*kB
GB = 1024*MB
-- Time
sec = 1000
second = sec
minute = 60 * sec
min = minute
hour = 60 * minute
day = 24 * hour
-- Logging
function panic(fmt, ...)
error(string.format('error: '..fmt, ...))
end
function warn(fmt, ...)
io.stderr:write(string.format(fmt..'\n', ...))
end
function log(fmt, ...)
print(string.format(fmt, ...))
end
-- Resolver bindings
kres = require('kres')
trust_anchors = require('trust_anchors')
resolve = worker.resolve
if rawget(kres, 'str2dname') ~= nil then
todname = kres.str2dname
end
-- Shorthand for aggregated per-worker information
worker.info = function ()
local t = worker.stats()
t.pid = worker.pid
return t
end
-- Resolver mode of operation
local current_mode = 'normal'
local mode_table = { normal=0, strict=1, permissive=2 }
function mode(m)
if not m then return current_mode end
if not mode_table[m] then error('unsupported mode: '..m) end
-- Update current operation mode
current_mode = m
option('STRICT', current_mode == 'strict')
option('PERMISSIVE', current_mode == 'permissive')
return true
end
-- Function aliases
-- `env.VAR returns os.getenv(VAR)`
env = {}
setmetatable(env, {
__index = function (t, k) return os.getenv(k) end
})
-- Quick access to interfaces
-- `net.<iface>` => `net.interfaces()[iface]`
-- `net = {addr1, ..}` => `net.listen(name, addr1)`
-- `net.ipv{4,6} = {true, false}` => enable/disable IPv{4,6}
setmetatable(net, {
__index = function (t, k)
local v = rawget(t, k)
if v then return v
elseif k == 'ipv6' then return not option('NO_IPV6')
elseif k == 'ipv4' then return not option('NO_IPV4')
else return net.interfaces()[k]
end
end,
__newindex = function (t,k,v)
if k == 'ipv6' then return option('NO_IPV6', not v)
elseif k == 'ipv4' then return option('NO_IPV4', not v)
else
local iname = rawget(net.interfaces(), v)
if iname then t.listen(iname)
else t.listen(v)
end
end
end
})
-- Syntactic sugar for module loading
-- `modules.<name> = <config>`
setmetatable(modules, {
__newindex = function (t,k,v)
if type(k) == 'number' then k = v v = nil end
if not rawget(_G, k) then
modules.load(k)
k = string.match(k, '%w+')
local mod = _G[k]
local config = mod and rawget(mod, 'config')
if mod ~= nil and config ~= nil then
if k ~= v then config(v)
else config()
end
end
end
end
})
-- Syntactic sugar for cache
-- `#cache -> cache.count()`
-- `cache[x] -> cache.get(x)`
-- `cache.{size|storage} = value`
setmetatable(cache, {
__len = function (t)
return t.count()
end,
__index = function (t, k)
return rawget(t, k) or (rawget(t, 'current_size') and t.get(k))
end,
__newindex = function (t,k,v)
-- Defaults
local storage = rawget(t, 'current_storage')
if not storage then storage = 'lmdb://' end
local size = rawget(t, 'current_size')
if not size then size = 10*MB end
-- Declarative interface for cache
if k == 'size' then t.open(v, storage)
elseif k == 'storage' then t.open(size, v)
else rawset(t, k, v) end
end
})
-- Syntactic sugar for TA store
setmetatable(trust_anchors, {
__newindex = function (t,k,v)
if k == 'file' then t.config(v)
elseif k == 'negative' then t.set_insecure(v)
else rawset(t, k, v) end
end,
})
-- Register module in Lua environment
function modules_register(module)
-- Syntactic sugar for get() and set() properties
setmetatable(module, {
__index = function (t, k)
local v = rawget(t, k)
if v then return v
elseif rawget(t, 'get') then return t.get(k)
end
end,
__newindex = function (t, k, v)
local old_v = rawget(t, k)
if not old_v and rawget(t, 'set') then
t.set(k..' '..v)
end
end
})
end
-- Make sandboxed environment
local function make_sandbox(defined)
local __protected = { modules = true, cache = true, net = true, trust_anchors = true }
return setmetatable({}, {
__index = defined,
__newindex = function (t, k, v)
if __protected[k] then
for k2,v2 in pairs(v) do
defined[k][k2] = v2
end
else
defined[k] = v
end
end
})
end
-- Compatibility sandbox
if setfenv then -- Lua 5.1 and less
_G = make_sandbox(getfenv(0))
setfenv(0, _G)
else -- Lua 5.2+
_SANDBOX = make_sandbox(_ENV)
end
-- Interactive command evaluation
function eval_cmd(line, raw)
-- Compatibility sandbox code loading
local function load_code(code)
if getfenv then -- Lua 5.1
return loadstring(code)
else -- Lua 5.2+
return load(code, nil, 't', _ENV)
end
end
local status, err, chunk
chunk, err = load_code(raw and 'return '..line or 'return table_print('..line..')')
if err then
chunk, err = load_code(line)
end
if not err then
return chunk()
else
error(err)
end
end
-- Pretty printing
function table_print (tt, indent, done)
done = done or {}
indent = indent or 0
result = ""
-- Convert to printable string (escape unprintable)
local function printable(value)
value = tostring(value)
local bytes = {}
for i = 1, #value do
local c = string.byte(value, i)
if c >= 0x20 and c < 0x7f then table.insert(bytes, string.char(c))
else table.insert(bytes, '\\'..tostring(c))
end
if i > 50 then table.insert(bytes, '...') break end
end
return table.concat(bytes)
end
if type(tt) == "table" then
for key, value in pairs (tt) do
result = result .. string.rep (" ", indent)
if type (value) == "table" and not done [value] then
done [value] = true
result = result .. string.format("[%s] => {\n", printable (key))
result = result .. table_print (value, indent + 4, done)
result = result .. string.rep (" ", indent)
result = result .. "}\n"
else
result = result .. string.format("[%s] => %s\n",
tostring (key), printable(value))
end
end
else
result = result .. tostring(tt) .. "\n"
end
return result
end
-- SPDX-License-Identifier: GPL-3.0-or-later
local debug = require('debug')
local ffi = require('ffi')
local kluautil = require('kluautil')
local krprint = require("krprint")
-- Units
kB = 1024
MB = 1024*kB
GB = 1024*MB
-- Time
sec = 1000
second = sec
minute = 60 * sec
min = minute
hour = 60 * minute
day = 24 * hour
-- Logging
-- from syslog.h
LOG_CRIT = 2
LOG_ERR = 3
LOG_WARNING = 4
LOG_NOTICE = 5
LOG_INFO = 6
LOG_DEBUG = 7
local function curr_file() return debug.getinfo(4,'S').source end
local function curr_line() return debug.getinfo(4,'l').currentline end
local function log_fmt(grp, level, fmt, ...)
ffi.C.kr_log_fmt(grp, level,
'CODE_FILE='..curr_file(), 'CODE_LINE='..curr_line(), 'CODE_FUNC=',
'[%-6s] %s\n', ffi.C.kr_log_grp2name(grp), string.format(fmt, ...))
end
function log_req(req, qry_uid, indent, grp, fmt, ...)
ffi.C.kr_log_req1(req, qry_uid, indent, grp, ffi.C.kr_log_grp2name(grp),
'%s\n', string.format(fmt, ...))
end
function log_qry(qry, grp, fmt, ...)
ffi.C.kr_log_q1(qry, grp, ffi.C.kr_log_grp2name(grp),
'%s\n', string.format(fmt, ...))
end
function panic(fmt, ...)
print(debug.traceback('error occurred here (config filename:lineno is '
.. 'at the bottom, if config is involved):', 2))
error(string.format('ERROR: '.. fmt, ...), 0)
end
function log_error(grp, fmt, ...)
log_fmt(grp, LOG_ERR, fmt, ...)
end
function log_warn(grp, fmt, ...)
log_fmt(grp, LOG_WARNING, fmt, ...)
end
function log_notice(grp, fmt, ...)
log_fmt(grp, LOG_NOTICE, fmt, ...)
end
function log_info(grp, fmt, ...)
log_fmt(grp, LOG_INFO, fmt, ...)
end
function log_debug(grp, fmt, ...)
log_fmt(grp, LOG_DEBUG, fmt, ...)
end
function log(fmt, ...)
log_notice(ffi.C.LOG_GRP_MODULE, fmt, ...)
end
-- Resolver bindings
kres = require('kres')
if rawget(kres, 'str2dname') ~= nil then
todname = kres.str2dname
end
worker.resolve_pkt = function (pkt, options, finish, init)
options = kres.mk_qflags(options)
local task = ffi.C.worker_resolve_start(pkt, options)
-- Deal with finish and init callbacks
if finish ~= nil then
local finish_cb
finish_cb = ffi.cast('trace_callback_f',
function (req)
jit.off(true, true) -- JIT for (C -> lua)^2 nesting isn't allowed
finish(req.answer, req)
finish_cb:free()
end)
task.ctx.req.trace_finish = finish_cb
end
if init ~= nil then
init(task.ctx.req)
end
return ffi.C.worker_resolve_exec(task, pkt) == 0
end
worker.resolve = function (qname, qtype, qclass, options, finish, init)
-- Alternatively use named arguments
if type(qname) == 'table' then
local t = qname
qname = t.name
qtype = t.type
qclass = t.class
options = t.options
finish = t.finish
init = t.init
end
qtype = qtype or kres.type.A
qclass = qclass or kres.class.IN
options = kres.mk_qflags(options)
-- LATER: nicer errors for rubbish in qname, qtype, qclass?
local pkt = ffi.C.worker_resolve_mk_pkt(qname, qtype, qclass, options)
if pkt == nil then
panic('failure in worker.resolve(); probably invalid qname "%s"', qname)
end
local ret = worker.resolve_pkt(pkt, options, finish, init)
ffi.C.knot_pkt_free(pkt);
return ret
end
resolve = worker.resolve
-- Shorthand for aggregated per-worker information
worker.info = function ()
local t = worker.stats()
t.pid = worker.pid
return t
end
-- Resolver mode of operation
local current_mode = 'normal'
local mode_table = { normal=0, strict=1, permissive=2 }
function mode(m)
if not m then return current_mode end
if not mode_table[m] then error('unsupported mode: '..m) end
-- Update current operation mode
current_mode = m
option('STRICT', current_mode == 'strict')
option('PERMISSIVE', current_mode == 'permissive')
return true
end
-- Trivial option alias
function reorder_RR(val)
return option('REORDER_RR', val)
end
-- Get/set resolver options via name (string)
function option(name, val)
local flags = kres.context().options;
-- Note: no way to test existence of flags[name] but we want error anyway.
name = string.upper(name) -- convenience
if val ~= nil then
if (val ~= true) and (val ~= false) then
panic('invalid option value: ' .. tostring(val))
end
flags[name] = val;
end
return flags[name];
end
-- Function aliases
-- `env.VAR returns os.getenv(VAR)`
env = {}
setmetatable(env, {
__index = function (_, k) return os.getenv(k) end
})
debugging = {}
setmetatable(debugging, {
__index = function(_, k)
if k == 'assertion_abort' then return ffi.C.kr_dbg_assertion_abort
elseif k == 'assertion_fork' then return ffi.C.kr_dbg_assertion_fork
else panic('invalid debugging option: ' .. tostring(k))
end
end,
__newindex = function(_, k, v)
if k == 'assertion_abort' then ffi.C.kr_dbg_assertion_abort = v
elseif k == 'assertion_fork' then ffi.C.kr_dbg_assertion_fork = v
else panic('invalid debugging option: ' .. tostring(k))
end
end
})
-- Quick access to interfaces
-- `net.<iface>` => `net.interfaces()[iface]`
-- `net = {addr1, ..}` => `net.listen(name, addr1)`
-- `net.ipv{4,6} = {true, false}` => enable/disable IPv{4,6}
setmetatable(net, {
__index = function (t, k)
local v = rawget(t, k)
if v then return v
elseif k == 'ipv6' then return not option('NO_IPV6')
elseif k == 'ipv4' then return not option('NO_IPV4')
else return net.interfaces()[k]
end
end,
__newindex = function (t,k,v)
if k == 'ipv6' then return option('NO_IPV6', not v)
elseif k == 'ipv4' then return option('NO_IPV4', not v)
else
local iname = rawget(net.interfaces(), v)
if iname then t.listen(iname)
else t.listen(v)
end
end
end
})
-- Syntactic sugar for module loading
-- `modules.<name> = <config>`
setmetatable(modules, {
__newindex = function (_, k, v)
if type(k) == 'number' then
k, v = v, nil
end
if not rawget(_G, k) then
modules.load(k)
k = string.match(k, '[%w_]+')
local mod = _G[k]
local config = mod and rawget(mod, 'config')
if mod ~= nil and config ~= nil then
if k ~= v then config(v)
else config()
end
end
end
end
})
-- Set up lua table for a C module. (Internal function.)
function modules_create_table_for_c(kr_module_ud)
local kr_module = ffi.cast('struct kr_module **', kr_module_ud)[0]
--- Set up the global table named according to the module.
if kr_module.config == nil and kr_module.props == nil then
return
end
local module = {}
local module_name = ffi.string(kr_module.name)
_G[module_name] = module
--- Construct lua functions for properties.
if kr_module.props ~= nil then
local i = 0
while true do
local prop = kr_module.props[i]
local cb = prop.cb
if cb == nil then break; end
module[ffi.string(prop.name)] =
function (arg) -- lua wrapper around kr_prop_cb function typedef
local arg_conv
if type(arg) == 'table' or type(arg) == 'boolean' then
arg_conv = tojson(arg)
elseif arg ~= nil then
arg_conv = tostring(arg)
end
local ret_cstr = cb(ffi.C.the_engine, kr_module, arg_conv)
if ret_cstr == nil then
return nil
end
-- LATER(optim.): superfluous copying
local ret_str = ffi.string(ret_cstr)
-- This is a bit ugly, but the API is that invalid JSON
-- should be just returned as string :-(
local status, ret = pcall(fromjson, ret_str)
if not status then ret = ret_str end
ffi.C.free(ret_cstr)
return ret
end
i = i + 1
end
end
--- Construct lua function for config().
if kr_module.config ~= nil then
module.config =
function (arg)
local arg_conv
if type(arg) == 'table' or type(arg) == 'boolean' then
arg_conv = tojson(arg)
elseif arg ~= nil then
arg_conv = tostring(arg)
end
return kr_module.config(kr_module, arg_conv)
end
end
--- Add syntactic sugar for get() and set() properties.
--- That also "catches" any commands like `moduleName.foo = bar`.
local m_index, m_newindex
local get_f = rawget(module, 'get')
if get_f ~= nil then
m_index = function (_, key)
return get_f(key)
end
else
m_index = function ()
error('module ' .. module_name .. ' does not support indexing syntax sugar')
end
end
local set_f = rawget(module, 'set')
if set_f ~= nil then
m_newindex = function (_, key, value)
-- This will produce a nasty error on some non-string parameters.
-- Still, we already use it with integer values, e.g. in predict module :-/
return set_f(key .. ' ' .. value)
end
else
m_newindex = function ()
error('module ' .. module_name .. ' does not support assignment syntax sugar')
end
end
setmetatable(module, {
-- note: the two functions only get called for *missing* indices
__index = m_index,
__newindex = m_newindex,
})
end
local layer_ctx = ffi.C.kr_layer_t_static
-- Utilities internal for lua layer glue; see ../ffimodule.c
modules_ffi_layer_wrap1 = function (layer_cb)
return layer_cb(layer_ctx.state, layer_ctx.req)
end
modules_ffi_layer_wrap2 = function (layer_cb)
return layer_cb(layer_ctx.state, layer_ctx.req, layer_ctx.pkt)
end
modules_ffi_layer_wrap_checkout = function (layer_cb)
return layer_cb(layer_ctx.state, layer_ctx.req, layer_ctx.pkt,
layer_ctx.dst, layer_ctx.is_stream)
end
modules_ffi_wrap_modcb = function (cb, kr_module_ud) -- this one isn't for layer
local kr_module = ffi.cast('struct kr_module **', kr_module_ud)[0]
return cb(kr_module)
end
-- Return filesystem size where the cache resides.
cache.fssize = function ()
local path = cache.current_storage or '.'
-- As it is now, `path` may or may not include the lmdb:// prefix.
if string.sub(path, 1, 7) == 'lmdb://' then
path = string.sub(path, 8)
end
if #path == 0 then
path = '.'
end
local size = tonumber(ffi.C.kr_fssize(path))
if size < 0 then
panic('cache.fssize(): %s', ffi.string(ffi.C.knot_strerror(size)))
else
return size
end
end
cache.clear = function (name, exact_name, rr_type, chunk_size, callback, prev_state)
if name == nil or (name == '.' and not exact_name) then
-- keep same output format as for 'standard' clear
local total_count = cache.count()
if not cache.clear_everything() then
error('unable to clear everything')
end
return {count = total_count}
end
-- Check parameters, in order, and set defaults if missing.
local dname = kres.str2dname(name)
if not dname then error('cache.clear(): incorrect name passed') end
if exact_name == nil then exact_name = false end
if type(exact_name) ~= 'boolean'
then error('cache.clear(): incorrect exact_name passed') end
local cach = kres.context().cache;
local rettable = {}
-- Apex warning. If the caller passes a custom callback,
-- we assume they are advanced enough not to need the check.
-- The point is to avoid repeating the check in each callback iteration.
if callback == nil then
local apex_array = ffi.new('knot_dname_t *[1]') -- C: dname **apex_array
local ret = ffi.C.kr_cache_closest_apex(cach, dname, false, apex_array)
if ret < 0 then
error(ffi.string(ffi.C.knot_strerror(ret))) end
if not ffi.C.knot_dname_is_equal(apex_array[0], dname) then
local apex_str = kres.dname2str(apex_array[0])
rettable.not_apex = 'to clear proofs of non-existence call '
.. 'cache.clear(\'' .. tostring(apex_str) ..'\')'
rettable.subtree = apex_str
end
ffi.C.free(apex_array[0])
end
if rr_type ~= nil then
-- Special case, without any subtree searching.
if not exact_name
then error('cache.clear(): specifying rr_type only supported with exact_name') end
if chunk_size or callback
then error('cache.clear(): chunk_size and callback parameters not supported with rr_type') end
local ret = ffi.C.kr_cache_remove(cach, dname, rr_type)
if ret < 0 then error(ffi.string(ffi.C.knot_strerror(ret))) end
return {count = 1}
end
if chunk_size == nil then chunk_size = 100 end
if type(chunk_size) ~= 'number' or chunk_size <= 0
then error('cache.clear(): chunk_size has to be a positive integer') end
-- Do the C call, and add chunk_size warning.
rettable.count = ffi.C.kr_cache_remove_subtree(cach, dname, exact_name, chunk_size)
if rettable.count == chunk_size then
local msg_extra = ''
if callback == nil then
msg_extra = '; the default callback will continue asynchronously'
end
rettable.chunk_limit = 'chunk size limit reached' .. msg_extra
end
-- Default callback function: repeat after 1ms
if callback == nil then callback =
function (cbname, cbexact_name, cbrr_type, cbchunk_size, cbself, cbprev_state, cbrettable)
if cbrettable.count < 0 then error(ffi.string(ffi.C.knot_strerror(cbrettable.count))) end
if cbprev_state == nil then cbprev_state = { round = 0 } end
if type(cbprev_state) ~= 'table'
then error('cache.clear() callback: incorrect prev_state passed') end
cbrettable.round = cbprev_state.round + 1
if (cbrettable.count == cbchunk_size) then
event.after(1, function ()
cache.clear(cbname, cbexact_name, cbrr_type, cbchunk_size, cbself, cbrettable)
end)
elseif cbrettable.round > 1 then
log_info(ffi.C.LOG_GRP_CACHE, 'asynchronous cache.clear(\'' .. cbname .. '\', '
.. tostring(cbexact_name) .. ') finished')
end
return cbrettable
end
end
return callback(name, exact_name, rr_type, chunk_size, callback, prev_state, rettable)
end
-- Syntactic sugar for cache
-- `cache[x] -> cache.get(x)`
-- `cache.{size|storage} = value`
setmetatable(cache, {
__index = function (t, k)
local res = rawget(t, k)
if not res and not rawget(t, 'current_size') then return res end
-- Beware: t.get returns empty table on failure to find.
-- That would be confusing here (breaking kresc), so return nil instead.
res = t.get(k)
if res and next(res) ~= nil then return res else return nil end
end,
__newindex = function (t,k,v)
-- Defaults
local storage = rawget(t, 'current_storage')
if not storage then storage = 'lmdb://' end
local size = rawget(t, 'current_size')
if not size then size = 10*MB end
-- Declarative interface for cache
if k == 'size' then t.open(v, storage)
elseif k == 'storage' then t.open(size, v) end
end
})
-- Make sandboxed environment
local function make_sandbox(defined)
local __protected = {
worker = true, env = true, debugging = true, modules = true,
cache = true, net = true, trust_anchors = true
}
-- Compute and export the list of top-level names (hidden otherwise)
local nl = ""
for n in pairs(defined) do
nl = nl .. n .. "\n"
end
return setmetatable({ __orig_name_list = nl }, {
__index = defined,
__newindex = function (_, k, v)
if __protected[k] then
for k2,v2 in pairs(v) do
defined[k][k2] = v2
end
else
defined[k] = v
end
end
})
end
-- Compatibility sandbox
_G = make_sandbox(getfenv(0))
setfenv(0, _G)
-- Load default modules
trust_anchors = require('trust_anchors')
modules.load('ta_update')
modules.load('ta_signal_query')
modules.load('policy')
modules.load('priming')
modules.load('detect_time_skew')
modules.load('detect_time_jump')
modules.load('ta_sentinel')
modules.load('edns_keepalive')
modules.load('refuse_nord')
modules.load('watchdog')
modules.load('extended_error')
-- Load keyfile_default
trust_anchors.add_file('@keyfile_default@', @unmanaged@)
local function eval_cmd_compile(line, mode)
-- Compatibility sandbox code loading
local function load_code(code)
if getfenv then -- Lua 5.1
return loadstring(code)
else -- Lua 5.2+
return load(code, nil, 't', _ENV)
end
end
-- See `ENGINE_EVAL_MODE_MAP(XX)` C-macro for possible values
local err, chunk
if mode == "LUA_TABLE" then
chunk, err = load_code('return table_print(('..line..'))')
elseif mode == "RAW" then
chunk, err = load_code('return ('..line..')')
elseif mode == "JSON" then
chunk, err = load_code('return tojson(('..line..'))')
else
return nil, "invalid mode"
end
if err then
chunk, err = load_code(line)
end
return chunk, err
end
-- Interactive command evaluation
function eval_cmd(line, mode)
local chunk, err = eval_cmd_compile(line, mode)
if not err then
return chunk()
else
error(err)
end
end
-- Pretty printing
local pprint = require('krprint').pprint
function table_print(...)
local strs = {}
local nargs = select('#', ...)
if nargs == 0 then
return nil
end
for n=1,nargs do
local arg = select(n, ...)
local arg_str = pprint(arg)
if nargs > 1 then
table.insert(strs, string.format("%s\t-- result # %d", arg_str, n))
else
table.insert(strs, arg_str)
end
end
return table.concat(strs, '\n')
end
-- This extends the worker module to allow asynchronous execution of functions and nonblocking I/O.
-- The current implementation combines cqueues for Lua interface, and event.socket() in order to not
-- block resolver engine while waiting for I/O or timers.
--
local has_cqueues, cqueues = pcall(require, 'cqueues')
if has_cqueues then
-- Export the asynchronous sleep function
worker.sleep = cqueues.sleep
-- Create metatable for workers to define the API
-- It can schedule multiple cqueues and yield execution when there's a wait for blocking I/O or timer
local asynchronous_worker_mt = {
work = function (self)
local ok, err, _, co = self.cq:step(0)
if not ok then
log_warn(ffi.C.LOG_GRP_SYSTEM, '%s error: %s %s', self.name or 'worker', err, debug.traceback(co))
end
-- Reschedule timeout or create new one
local timeout = self.cq:timeout()
if timeout then
-- Throttle timeouts to avoid too frequent wakeups
if timeout == 0 then timeout = 0.00001 end
-- Convert from seconds to duration
timeout = timeout * sec
if not self.next_timeout then
self.next_timeout = event.after(timeout, self.on_step)
else
event.reschedule(self.next_timeout, timeout)
end
else -- Cancel running timeout when there is no next deadline
if self.next_timeout then
event.cancel(self.next_timeout)
self.next_timeout = nil
end
end
end,
wrap = function (self, f)
self.cq:wrap(f)
end,
loop = function (self)
self.on_step = function () self:work() end
self.event_fd = event.socket(self.cq:pollfd(), self.on_step)
end,
close = function (self)
if self.event_fd then
event.cancel(self.event_fd)
self.event_fd = nil
end
end,
}
-- Implement the coroutine worker with cqueues
local function worker_new (name)
return setmetatable({name = name, cq = cqueues.new()}, { __index = asynchronous_worker_mt })
end
-- Create a default background worker
worker.bg_worker = worker_new('worker.background')
worker.bg_worker:loop()
-- Wrap a function for asynchronous execution
function worker.coroutine (f)
worker.bg_worker:wrap(f)
end
else
-- Disable asynchronous execution
local function disabled ()
error('Lua library cqueues is required for asynchronous execution (luaJIT requires library for Lua 5.1)')
end
worker.sleep = disabled
worker.map = disabled
worker.coroutine = disabled
worker.bg_worker = setmetatable({}, { __index = disabled })
end
-- Global commands for map()
-- must be public because it is called from eval_cmd()
-- when map() commands are read from control socket
function _map_luaobj_call_wrapper(cmd)
local func = eval_cmd_compile(cmd, "RAW")
local ret = kluautil.kr_table_pack(xpcall(func, debug.traceback))
local ok, serial = pcall(krprint.serialize_lua, ret, 'error')
if not ok then
log_error(ffi.C.LOG_GRP_SYSTEM, 'failed to serialize map() response %s (%s)',
table_print(ret), serial)
return krprint.serialize_lua(
kluautil.kr_table_pack(false, "returned values cannot be serialized: "
.. serial))
else
return serial
end
end
local function _sock_errmsg(path, desc)
return string.format(
'map() error while communicating with %s: %s',
path, desc)
end
local function _sock_check(sock, call, params, path, desc)
local errprefix = _sock_errmsg(path, desc) .. ': '
local retvals = kluautil.kr_table_pack(pcall(call, unpack(params)))
local ok = retvals[1]
if not ok then
error(errprefix .. tostring(retvals[2]))
end
local rerr, werr = sock:error()
if rerr or werr then
error(string.format('%sread error %s; write error %s', errprefix, rerr, werr))
end
if retvals[2] == nil then
error(errprefix .. 'unexpected nil result')
end
return unpack(retvals, 2, retvals.n)
end
local function _sock_assert(condition, path, desc)
if not condition then
error(_sock_errmsg(path, desc))
end
end
local function map_send_recv(cmd, path)
local bit = require('bit')
local socket = require('cqueues.socket')
local s = socket.connect({ path = path })
s:setmaxerrs(0)
s:setmode('bn', 'bn')
local status, err = pcall(s.connect, s)
if not status then
log_error(ffi.C.LOG_GRP_NETWORK, 'map() error while connecting to control socket %s: '
.. '%s (ignoring this socket)', path, err)
return nil
end
local ret = _sock_check(s, s.write, {s, '__binary\n'}, path,
'write __binary')
_sock_assert(ret, path,
'write __binary result')
local recv = _sock_check(s, s.read, {s, 2}, path,
'read reply to __binary')
_sock_assert(recv and recv == '> ', path,
'unexpected reply to __binary')
_sock_check(s, s.write, {s, cmd..'\n'}, path,
'command write')
recv = _sock_check(s, s.read, {s, 4}, path,
'response length read')
_sock_assert(recv and #recv == 4, path,
'length of response length preamble does not match')
local len = tonumber(recv:byte(1))
for i=2,4 do
len = bit.bor(bit.lshift(len, 8), tonumber(recv:byte(i)))
end
ret = _sock_check(s, s.read, {s, len}, path,
'read response')
_sock_assert(ret and #ret == len, path,
'actual response length does not match length in preamble')
s:close()
return ret
end
-- internal use only
-- Call cmd on each instance via control sockets.
-- @param format - "luaobj" if individual results should be Lua objects
-- - "strings" for eval_cmd output for each instance
-- @returns table with results, one item per instance + key n=number of instances
-- (order of return values is undefined)
-- @throws Lua error if:
-- - communication failed in the middle of transaction
-- - a result is not serializable
-- - individual call throws an error
-- - number of return values != 1 per instance per call
-- - cmd execution state is undefined after an error
-- Connection errors at the beginning are ignored to paper over leftover dead sockets.
function map(cmd, format)
local local_sockets = {}
local results = {}
if (type(cmd) ~= 'string') then
panic('map() command must be a string') end
if string.find(cmd, '\n', 1, true) then
panic('map() command cannot contain literal \\n, escape it with \\010') end
if (#cmd <= 0) then
panic('map() command must be non-empty') end
-- syntax check on input command to detect typos early
local chunk, err = eval_cmd_compile(cmd, "LUA_TABLE")
if not chunk then
panic('failure when compiling map() command: %s', err)
end
format = format or 'luaobj'
if (format ~= 'luaobj' and format ~= 'strings') then
panic('map() output format must be luaobj or strings') end
if format == 'luaobj' then
cmd = '_map_luaobj_call_wrapper([=====[' .. cmd .. ']=====])'
end
-- find out control socket paths
for _,v in pairs(net.list()) do
if (v['kind'] == 'control') and (v['transport']['family'] == 'unix') then
table.insert(local_sockets, string.match(v['transport']['path'], '^.*/([^/]+)$'))
end
end
local filetab = kluautil.list_dir(worker.control_path)
if next(filetab) == nil then
panic('no control sockets found in directory %s',
worker.control_path)
end
local result_count = 0
-- finally execute it on all instances
for _, file in ipairs(filetab) do
local local_exec = false
for _, lsoc in ipairs(local_sockets) do
if file == lsoc then
local_exec = true
end
end
local path = worker.control_path..file
local path_name = (local_exec and 'this instance') or path
log_info(ffi.C.LOG_GRP_SYSTEM, 'executing map() on %s: command %s', path_name, cmd)
local ret
if local_exec then
ret = eval_cmd(cmd, "LUA_TABLE")
else
ret = map_send_recv(cmd, path)
-- skip dead sockets (leftovers from dead instances)
if ret == nil then
goto continue
end
end
result_count = result_count + 1
-- return value is output from eval_cmd
-- i.e. string including "quotes" and Lua escaping in between
assert(type(ret) == 'string', 'map() protocol error, '
.. 'string not retured by follower')
assert(#ret >= 2 and
string.sub(ret, 1, 1) == "'"
and string.sub(ret, -1, -1) == "'",
'map() protocol error, value returned by follower does '
.. 'not look like a string')
-- deserialize string: remove "quotes" and de-escape bytes
ret = krprint.deserialize_lua(ret)
if format == 'luaobj' then
-- ret should be table with xpcall results serialized into string
ret = krprint.deserialize_lua(ret)
assert(type(ret) == 'table', 'map() protocol error, '
.. 'table with results not retured by follower')
if (ret.n ~= 2) then
log_error(ffi.C.LOG_GRP_SYSTEM, 'got unsupported map() response: %s', table_print(ret))
panic('unexpected number of return values in map() response: '
.. 'only single return value is allowed, '
.. 'use kluautil.kr_table_pack() helper')
end
local ok, retval = ret[1], ret[2]
if ok == false then
panic('error when executing map() command on control socket %s: '
.. '%s. command execution state is now undefined!',
path, retval)
end
-- drop wrapper table and return only the actual return value
ret = retval
end
results[result_count] = ret
::continue::
end
results.n = result_count
return results
end
-- Fetch over HTTPS with peert cert checked
local function https_fetch(url, ca)
local ssl_ok, https = pcall(require, 'ssl.https')
local ltn_ok, ltn12 = pcall(require, 'ltn12')
if not ssl_ok or not ltn_ok then
return nil, 'luasec and luasocket needed for root TA bootstrap'
end
local resp = {}
local r, c, h, s = https.request{
url = url,
cafile = ca,
verify = {'peer', 'fail_if_no_peer_cert' },
protocol = 'tlsv1_2',
sink = ltn12.sink.table(resp),
}
if r == nil then return r, c end
return resp[1]
end
-- Fetch root anchors in XML over HTTPS
local function bootstrap(url, ca)
-- @todo ICANN certificate is verified against current CA
-- this is not ideal, as it should rather verify .xml signature which
-- is signed by ICANN long-lived cert, but luasec has no PKCS7
ca = ca or etcdir..'/icann-ca.pem'
url = url or 'https://data.iana.org/root-anchors/root-anchors.xml'
local xml, err = https_fetch(url, ca)
if not xml then
return false, string.format('[ ta ] fetch of "%s" failed: %s', url, err)
end
-- Parse root trust anchor
local fields = {}
string.gsub(xml, "<([%w]+).->([^<]+)</[%w]+>", function (k, v) fields[k] = v end)
local rrdata = string.format('%s %s %s %s', fields.KeyDigest, fields.Algorithm, fields.DigestType, fields.Digest)
local rr = string.format('%s 0 IN DS %s', fields.TrustAnchor, rrdata)
-- Add to key set, create an empty keyset file to be filled
print('[ ta ] warning: root anchor bootstrapped, you SHOULD check the key manually, see: '..
'https://data.iana.org/root-anchors/draft-icann-dnssec-trust-anchor.html#sigs')
return rr
end
-- Load the module (check for FFI)
local ffi_ok, ffi = pcall(require, 'ffi')
if not ffi_ok then
-- Simplified TA management, no RFC5011 automatics
return {
-- Reuse Lua/C global function
add = trustanchor,
-- Simplified trust anchor management
config = function (path)
if not path then return end
if not io.open(path, 'r') then
local rr, err = bootstrap()
if not rr then print(err) return false end
local keyfile = assert(io.open(path, 'w'))
keyfile:write(rr..'\n')
end
for line in io.lines(path) do
trustanchor(line)
end
end,
-- Disabled
set_insecure = function () error('[ ta ] FFI not available, this function is disabled') end,
}
end
local kres = require('kres')
local C = ffi.C
-- RFC5011 state table
local key_state = {
Start = 'Start', AddPend = 'AddPend', Valid = 'Valid',
Missing = 'Missing', Revoked = 'Revoked', Removed = 'Removed'
}
-- Find key in current keyset
local function ta_find(keyset, rr)
for i, ta in ipairs(keyset) do
-- Match key owner and content
if ta.owner == rr.owner then
if ta.type == rr.type then
if rr.type == kres.type.DNSKEY then
if C.kr_dnssec_key_match(ta.rdata, #ta.rdata, rr.rdata, #rr.rdata) == 0 then
return ta
end
elseif rr.type == kres.type.DS and ta.rdata == rr.rdata then
return ta
end
-- DNSKEY superseding DS, inexact match
elseif rr.type == kres.type.DNSKEY and ta.type == kres.type.DS then
if ta.key_tag == C.kr_dnssec_key_tag(rr.type, rr.rdata, #rr.rdata) then
keyset[i] = rr -- Replace current DS
rr.state = ta.state
rr.key_tag = ta.key_tag
return rr
end
-- DS key matching DNSKEY, inexact match
elseif rr.type == kres.type.DS and ta.type == kres.type.DNSKEY then
local ds_tag = C.kr_dnssec_key_tag(rr.type, rr.rdata, #rr.rdata)
local dnskey_tag = C.kr_dnssec_key_tag(ta.type, ta.rdata, #ta.rdata)
if ds_tag == dnskey_tag then
return ta
end
end
end
end
return nil
end
-- Evaluate TA status according to RFC5011
local function ta_present(keyset, rr, hold_down_time, force)
if rr.type == kres.type.DNSKEY and not C.kr_dnssec_key_ksk(rr.rdata) then
return false -- Ignore
end
-- Find the key in current key set and check its status
local now = os.time()
local key_revoked = (rr.type == kres.type.DNSKEY) and C.kr_dnssec_key_revoked(rr.rdata)
local key_tag = C.kr_dnssec_key_tag(rr.type, rr.rdata, #rr.rdata)
local ta = ta_find(keyset, rr)
if ta then
-- Key reappears (KeyPres)
if ta.state == key_state.Missing then
ta.state = key_state.Valid
ta.timer = nil
end
-- Key is revoked (RevBit)
if ta.state == key_state.Valid or ta.state == key_state.Missing then
if key_revoked then
ta.state = key_state.Revoked
ta.timer = os.time() + hold_down_time
end
end
-- Remove hold-down timer expires (RemTime)
if ta.state == key_state.Revoked and os.difftime(ta.timer, now) <= 0 then
ta.state = key_state.Removed
ta.timer = nil
end
-- Add hold-down timer expires (AddTime)
if ta.state == key_state.AddPend and os.difftime(ta.timer, now) <= 0 then
ta.state = key_state.Valid
ta.timer = nil
end
if rr.state ~= key_state.Valid or verbose() then
print('[ ta ] key: '..key_tag..' state: '..ta.state)
end
return true
elseif not key_revoked then -- First time seen (NewKey)
rr.key_tag = key_tag
if force then
rr.state = key_state.Valid
else
rr.state = key_state.AddPend
rr.timer = now + hold_down_time
end
if rr.state ~= key_state.Valid or verbose() then
print('[ ta ] key: '..key_tag..' state: '..rr.state)
end
table.insert(keyset, rr)
return true
end
return false
end
-- TA is missing in the new key set
local function ta_missing(ta, hold_down_time)
-- Key is removed (KeyRem)
local keep_ta = true
local key_tag = C.kr_dnssec_key_tag(ta.type, ta.rdata, #ta.rdata)
if ta.state == key_state.Valid then
ta.state = key_state.Missing
ta.timer = os.time() + hold_down_time
-- Purge pending key
elseif ta.state == key_state.AddPend then
print('[ ta ] key: '..key_tag..' purging')
keep_ta = false
end
print('[ ta ] key: '..key_tag..' state: '..ta.state)
return keep_ta
end
-- Plan refresh event and re-schedule itself based on the result of the callback
local function refresh_plan(trust_anchors, timeout, refresh_cb, priming, bootstrap)
trust_anchors.refresh_ev = event.after(timeout, function (ev)
resolve('.', kres.type.DNSKEY, kres.class.IN, kres.query.NO_CACHE,
function (pkt)
-- Schedule itself with updated timeout
local next_time = refresh_cb(trust_anchors, kres.pkt_t(pkt), bootstrap)
if trust_anchors.refresh_time ~= nil then
next_time = trust_anchors.refresh_time
end
print('[ ta ] next refresh: '..next_time)
refresh_plan(trust_anchors, next_time, refresh_cb)
-- Priming query, prime root NS next
if priming ~= nil then
resolve('.', kres.type.NS, kres.class.IN)
end
end)
end)
end
-- Active refresh, return time of the next check
local function active_refresh(trust_anchors, pkt, bootstrap)
local retry = true
if pkt:rcode() == kres.rcode.NOERROR then
local records = pkt:section(kres.section.ANSWER)
local keyset = {}
for i, rr in ipairs(records) do
if rr.type == kres.type.DNSKEY then
table.insert(keyset, rr)
end
end
trust_anchors.update(keyset, bootstrap)
retry = false
end
-- Calculate refresh/retry timer (RFC 5011, 2.3)
local min_ttl = retry and day or 15 * day
for i, rr in ipairs(trust_anchors.keyset) do -- 10 or 50% of the original TTL
min_ttl = math.min(min_ttl, (retry and 100 or 500) * rr.ttl)
end
return math.max(hour, min_ttl)
end
-- Write keyset to a file
local function keyset_write(keyset, path)
local file = assert(io.open(path..'.lock', 'w'))
for i = 1, #keyset do
local ta = keyset[i]
local rr_str = string.format('%s ; %s\n', kres.rr2str(ta), ta.state)
if ta.state ~= key_state.Valid and ta.state ~= key_state.Missing then
rr_str = '; '..rr_str -- Invalidate key string
end
file:write(rr_str)
end
file:close()
os.rename(path..'.lock', path)
end
-- TA store management
local trust_anchors = {
keyset = {},
insecure = {},
hold_down_time = 30 * day,
keep_removed = 0,
-- Update existing keyset
update = function (new_keys, initial)
if not new_keys then return false end
-- Filter TAs to be purged from the keyset (KeyRem)
local hold_down = trust_anchors.hold_down_time / 1000
local keyset = {}
local keep_removed = trust_anchors.keep_removed
for i, ta in ipairs(trust_anchors.keyset) do
local keep = true
if not ta_find(new_keys, ta) then
keep = ta_missing(ta, hold_down)
end
-- Purge removed keys
if ta.state == key_state.Removed then
if keep_removed > 0 then
keep_removed = keep_removed - 1
else
keep = false
end
end
if keep then
table.insert(keyset, ta)
end
end
-- Evaluate new TAs
for i, rr in ipairs(new_keys) do
if (rr.type == kres.type.DNSKEY or rr.type == kres.type.DS) and rr.rdata ~= nil then
ta_present(keyset, rr, hold_down, initial)
end
end
-- Publish active TAs
local store = kres.context().trust_anchors
C.kr_ta_clear(store)
if next(keyset) == nil then return false end
for i, ta in ipairs(keyset) do
-- Key MAY be used as a TA only in these two states (RFC5011, 4.2)
if ta.state == key_state.Valid or ta.state == key_state.Missing then
C.kr_ta_add(store, ta.owner, ta.type, ta.ttl, ta.rdata, #ta.rdata)
end
end
trust_anchors.keyset = keyset
-- Store keyset in the file
if trust_anchors.file_current ~= nil then
keyset_write(keyset, trust_anchors.file_current)
end
return true
end,
-- Load keys from a file (managed)
config = function (path, unmanaged)
-- Bootstrap if requested and keyfile doesn't exist
if trust_anchors.refresh_ev ~= nil then event.cancel(trust_anchors.refresh_ev) end
if not io.open(path, 'r') then
local rr, msg = bootstrap()
if not rr then
error('you MUST obtain the root TA manually, see: '..
'https://knot-resolver.readthedocs.io/en/latest/daemon.html#enabling-dnssec')
end
trustanchor(rr)
-- Fetch DNSKEY immediately
trust_anchors.file_current = path
refresh_plan(trust_anchors, 0, active_refresh, true, true)
return
elseif path == trust_anchors.file_current then
return
end
-- Parse new keys, refresh eventually
local new_keys = require('zonefile').file(path)
if unmanaged then
trust_anchors.file_current = nil
else
trust_anchors.file_current = path
end
trust_anchors.keyset = {}
if trust_anchors.update(new_keys, true) then
refresh_plan(trust_anchors, 10 * sec, active_refresh, true, false)
end
end,
-- Add DS/DNSKEY record(s) (unmanaged)
add = function (keystr)
return trustanchor(keystr)
end,
-- Negative TA management
set_insecure = function (list)
local store = kres.context().negative_anchors
C.kr_ta_clear(store)
for i = 1, #list do
local dname = kres.str2dname(list[i])
C.kr_ta_add(store, dname, kres.type.DS, 0, nil, 0)
end
trust_anchors.insecure = list
end,
}
return trust_anchors
-- SPDX-License-Identifier: GPL-3.0-or-later
-- Load the module
local ffi = require 'ffi'
local kres = require('kres')
local C = ffi.C
local trust_anchors -- the public pseudo-module, exported as global variable
-- RFC5011 state table
local key_state = {
Start = 'Start', AddPend = 'AddPend', Valid = 'Valid',
Missing = 'Missing', Revoked = 'Revoked', Removed = 'Removed'
}
local function upgrade_required(msg)
if msg then
msg = msg .. '\n'
else
msg = ''
end
panic('Configuration upgrade required: ' .. msg .. 'Please refer to ' ..
'https://www.knot-resolver.cz/documentation/latest/upgrading.html')
end
-- TODO: Move bootstrap to a separate module or even its own binary
-- remove UTC timezone specification if present or throw error
local function time2utc(orig_timespec)
local patterns = {'[+-]00:00$', 'Z$'}
for _, pattern in ipairs(patterns) do
local timespec, removals = string.gsub(orig_timespec, pattern, '')
if removals == 1 then
return timespec
end
end
error(string.format('unsupported time specification: %s', orig_timespec))
end
local function keydigest_is_valid(valid_from, valid_until)
local format = '%Y-%m-%dT%H:%M:%S'
local time_now = os.date('!%Y-%m-%dT%H:%M:%S') -- ! forces UTC
local time_diff = ffi.new('double[1]')
local err = ffi.C.kr_strptime_diff(
format, time_now, time2utc(valid_from), time_diff)
if (err ~= nil) then
error(string.format('failed to process "validFrom" constraint: %s',
ffi.string(err)))
end
local from_ok = time_diff[0] > 0
-- optional attribute
local until_ok = true
if valid_until then
err = ffi.C.kr_strptime_diff(
format, time_now, time2utc(valid_until), time_diff)
if (err ~= nil) then
error(string.format('failed to process "validUntil" constraint: %s',
ffi.string(err)))
end
until_ok = time_diff[0] < 0
end
return from_ok and until_ok
end
local function parse_xml_keydigest(attrs, inside, output)
local fields = {}
local _, n = string.gsub(attrs, "([%w]+)=\"([^\"]*)\"", function (k, v) fields[k] = v end)
assert(n >= 1,
string.format('cannot parse XML attributes from "%s"', attrs))
assert(fields['validFrom'],
string.format('mandatory KeyDigest XML attribute validFrom ' ..
'not found in "%s"', attrs))
local valid_attrs = {id = true, validFrom = true, validUntil = true}
for key, _ in pairs(fields) do
assert(valid_attrs[key],
string.format('unsupported KeyDigest attribute "%s" found in "%s"',
key, attrs))
end
_, n = string.gsub(inside, "<([%w]+).->([^<]+)</[%w]+>", function (k, v) fields[k] = v end)
assert(n >= 1,
string.format('error parsing KeyDigest XML elements from "%s"',
inside))
local mandatory_elements = {'KeyTag', 'Algorithm', 'DigestType', 'Digest'}
for _, key in ipairs(mandatory_elements) do
assert(fields[key],
string.format('mandatory element %s is missing in "%s"',
key, inside))
end
assert(n == 4, string.format('found %d elements but expected 4 in %s', n, inside))
table.insert(output, fields) -- append to list of parsed keydigests
end
local function generate_ds(keydigests)
local rrset = ''
for _, fields in ipairs(keydigests) do
local rr = string.format(
'. 0 IN DS %s %s %s %s',
fields.KeyTag, fields.Algorithm, fields.DigestType, fields.Digest)
if keydigest_is_valid(fields['validFrom'], fields['validUntil']) then
rrset = rrset .. '\n' .. rr
else
log_info(ffi.C.LOG_GRP_TA, 'skipping trust anchor "%s" ' ..
'because it is outside of validity range', rr)
end
end
return rrset
end
local function assert_str_match(str, pattern, expected)
local count = 0
for _ in string.gmatch(str, pattern) do
count = count + 1
end
assert(count == expected,
string.format('expected %d occurences of "%s" but got %d in "%s"',
expected, pattern, count, str))
end
-- Fetch root anchors in XML over HTTPS, returning a zone-file-style string
-- or false in case of error, and a message.
local function bootstrap(url, ca)
local kluautil = require('kluautil')
local file = io.tmpfile()
-- RFC 7958, sec. 2, but we don't do precise XML parsing.
-- @todo ICANN certificate is verified against current CA
-- this is not ideal, as it should rather verify .xml signature which
-- is signed by ICANN long-lived cert, but luasec has no PKCS7
local rcode, errmsg = kluautil.kr_https_fetch(url, file, ca)
if rcode == nil then
file:close()
return false, string.format('[ ta ] fetch of "%s" failed: %s', url, errmsg)
end
local xml = file:read("*a")
file:close()
-- we support only minimal subset of https://tools.ietf.org/html/rfc7958
assert_str_match(xml, '<?xml version="1%.0" encoding="UTF%-8"%?>', 1)
assert_str_match(xml, '<TrustAnchor ', 1)
assert_str_match(xml, '<Zone>.</Zone>', 1)
assert_str_match(xml, '</TrustAnchor>', 1)
-- Parse root trust anchor, one digest at a time, converting to a zone-file-style string.
local keydigests = {}
string.gsub(xml, "<KeyDigest([^>]*)>(.-)</KeyDigest>", function(attrs, inside)
parse_xml_keydigest(attrs, inside, keydigests)
end)
local rrset = generate_ds(keydigests)
if rrset == '' then
return false, string.format('[ ta ] no valid trust anchors found at "%s"', url)
end
local msg = '[ ta ] Root trust anchors bootstrapped over https with pinned certificate.\n'
.. ' You SHOULD verify them manually against original source:\n'
.. ' https://www.iana.org/dnssec/files\n'
.. '[ ta ] Bootstrapped root trust anchors are:'
.. rrset
return rrset, msg
end
local function bootstrap_write(rrstr, filename)
local fname_tmp = filename .. '.lock.' .. tostring(worker.pid);
local file = assert(io.open(fname_tmp, 'w'))
file:write(rrstr)
file:close()
assert(os.rename(fname_tmp, filename))
end
-- Bootstrap end
-- Update ta.comment and return decorated line representing the RR
-- This is meant to be in zone-file format.
local function ta_rr_str(ta)
ta.comment = ' ' .. ta.state .. ':' .. (ta.timer or '')
.. ' ; KeyTag:' .. ta.key_tag -- the tag is just for humans
local rr_str = kres.rr2str(ta) .. '\n'
if ta.state ~= key_state.Valid and ta.state ~= key_state.Missing then
rr_str = '; '..rr_str -- Invalidate key string (for older kresd versions)
end
return rr_str
end
-- Write keyset to a file. States and timers are stored in comments.
local function keyset_write(keyset)
if not keyset.managed then -- not to be persistent, this is an error!
panic('internal error: keyset_write called for an unmanaged TA')
end
local fname_tmp = keyset.filename .. '.lock.' .. tostring(worker.pid);
local file = assert(io.open(fname_tmp, 'w'))
for i = 1, #keyset do
file:write(ta_rr_str(keyset[i]))
end
file:close()
assert(os.rename(fname_tmp, keyset.filename))
end
-- Search the values of a table and return the corresponding key (or nil).
local function table_search(t, val)
for k, v in pairs(t) do
if v == val then
return k
end
end
return nil
end
-- For each RR, parse .state and .timer from .comment.
local function keyset_parse_comments(tas, default_state)
for _, ta in pairs(tas) do
ta.state = default_state
if ta.comment then
string.gsub(ta.comment, '^%s*(%a+):(%d*)', function (state, time)
if table_search(key_state, state) then
ta.state = state
end
ta.timer = tonumber(time) -- nil on failure
end)
ta.comment = nil
end
end
return tas
end
-- Read keyset from a file xor a string. (This includes the key states and timers.)
local function keyset_read(path, str)
if (path == nil) == (str == nil) then -- exactly one of them must be nil
return nil, "internal ERROR: incorrect call to TA's keyset_read"
end
-- First load the regular entries, trusting them.
local zonefile = require('zonefile')
local tas, err
if path ~= nil then
tas, err = zonefile.file(path)
else
tas, err = zonefile.string(str)
end
if not tas then
return tas, err
end
keyset_parse_comments(tas, key_state.Valid)
-- The untrusted keys are commented out but important to load.
local line_iter
if path ~= nil then
line_iter = io.lines(path)
else
line_iter = string.gmatch(str, "[^\n]+")
end
for line in line_iter do
if line:sub(1, 2) == '; ' then
-- Ignore the line if it fails to parse including recognized .state.
local l_set = zonefile.string(line:sub(3))
if l_set and l_set[1] then
keyset_parse_comments(l_set)
if l_set[1].state then
table.insert(tas, l_set[1])
end
end
end
end
-- Fill tas[*].key_tag
for _, ta in pairs(tas) do
local ta_keytag = C.kr_dnssec_key_tag(ta.type, ta.rdata, #ta.rdata)
if not (ta_keytag >= 0 and ta_keytag <= 65535) then
return nil, string.format('invalid key: "%s": %s',
kres.rr2str(ta), ffi.string(C.knot_strerror(ta_keytag)))
end
ta.key_tag = ta_keytag
end
-- Fill tas.owner
if not tas[1] then
return nil, "empty TA set"
end
local owner = tas[1].owner
for _, ta in ipairs(tas) do
if ta.owner ~= owner then
return nil, string.format("do not mix %s and %s TAs in single file/string",
kres.dname2str(ta.owner), kres.dname2str(owner))
end
end
tas.owner = owner
return tas
end
-- Replace current TAs for given owner by the "trusted" ones from passed keyset.
-- Return true iff no TA errored out and at least one is in VALID state.
local function keyset_publish(keyset)
local store = kres.context().trust_anchors
local count = 0
local has_error = false
C.kr_ta_del(store, keyset.owner)
for _, ta in ipairs(keyset) do
-- Key MAY be used as a TA only in these two states (RFC5011, 4.2)
if ta.state == key_state.Valid or ta.state == key_state.Missing then
if C.kr_ta_add(store, ta.owner, ta.type, ta.ttl, ta.rdata, #ta.rdata) == 0 then
count = count + 1
else
ta.state = 'ERROR'
has_error = true
end
end
end
if count == 0 then
log_error(ffi.C.LOG_GRP_TA, 'ERROR: no anchors are trusted for ' ..
kres.dname2str(keyset.owner) .. ' !')
end
return count > 0 and not has_error
end
local function add_file(path, unmanaged)
local managed = not unmanaged
if not ta_update then
modules.load('ta_update')
end
if managed then
if not io.open(path .. '.lock', 'w') then
error("[ ta ] ERROR: write access needed to keyfile dir '"..path.."'")
end
os.remove(path .. ".lock")
end
-- Bootstrap TA for root zone if keyfile doesn't exist
if managed and not io.open(path, 'r') then
if trust_anchors.keysets['\0'] then
error(string.format(
"[ ta ] keyfile '%s' doesn't exist and root key is already installed, "
.. "cannot bootstrap; provide a path to valid file with keys", path))
end
log_info(ffi.C.LOG_GRP_TA, "keyfile '%s': doesn't exist, bootstrapping", path);
local rrstr, msg = bootstrap(trust_anchors.bootstrap_url, trust_anchors.bootstrap_ca)
if not rrstr then
msg = msg .. '\n'
.. '[ ta ] Failed to bootstrap root trust anchors!'
error(msg)
end
print(msg)
bootstrap_write(rrstr, path)
-- continue as if the keyfile was there
end
-- Parse the file and check its sanity
local keyset, err = keyset_read(path)
if not keyset then
panic("[ ta ] ERROR: failed to read anchors from '%s' (%s)", path, err)
end
keyset.filename = path
keyset.managed = managed
local owner = keyset.owner
local owner_str = kres.dname2str(owner)
local keyset_orig = trust_anchors.keysets[owner]
if keyset_orig then
log_warn(ffi.C.LOG_GRP_TA, 'warning: overriding previously set trust anchors for ' .. owner_str)
if keyset_orig.managed and ta_update then
ta_update.stop(owner)
end
end
trust_anchors.keysets[owner] = keyset
-- Replace the TA store used for validation
if keyset_publish(keyset) then
log_info(ffi.C.LOG_GRP_TA, 'installed trust anchors for domain ' .. owner_str .. ' are:\n'
.. trust_anchors.summary(owner))
end
-- TODO: if failed and for root, try to rebootstrap?
ta_update.start(owner, managed)
end
local function remove(zname)
local owner = kres.str2dname(zname)
if not trust_anchors.keysets[owner] then
return false
end
if ta_update then
ta_update.stop(owner)
end
trust_anchors.keysets[owner] = nil
local store = kres.context().trust_anchors
C.kr_ta_del(store, owner)
return true
end
local function ta_str(owner)
local owner_str = kres.dname2str(owner) .. ' '
local msg = ''
for _, nta in pairs(trust_anchors.insecure) do
if owner == kres.str2dname(nta) then
msg = owner_str .. 'is negative trust anchor\n'
end
end
if not trust_anchors.keysets[owner] then
if #msg > 0 then -- it is normal that NTA does not have explicit TA
return msg
else
return owner_str .. 'has no explicit trust anchors\n'
end
end
if #msg > 0 then
msg = msg .. 'WARNING! negative trust anchor also has an explicit TA\n'
end
for _, ta in ipairs(trust_anchors.keysets[owner]) do
msg = msg .. ta_rr_str(ta)
end
return msg
end
-- TA store management, for user docs see ../README.rst
trust_anchors = {
-- [internal] table indexed by dname;
-- each item is a list of RRs and additionally contains:
-- - owner - that dname (for simplicity)
-- - [optional] filename in which to persist the state,
-- implying unmanaged TA if nil
-- The RR tables also contain some additional TA-specific fields.
keysets = {},
-- Documented properties:
insecure = {},
bootstrap_url = 'https://data.iana.org/root-anchors/root-anchors.xml',
bootstrap_ca = '@etc_dir@/icann-ca.pem',
-- Load keys from a file, 5011-managed by default.
-- If managed and the file doesn't exist, try bootstrapping the root into it.
add_file = add_file,
config = function() upgrade_required('trust_anchors.config was removed, use trust_anchors.add_file()') end,
remove = remove,
keyset_publish = keyset_publish,
keyset_write = keyset_write,
key_state = key_state,
-- Add DS/DNSKEY record(s) (unmanaged)
add = function (keystr)
local keyset, err = keyset_read(nil, keystr)
if keyset ~= nil then
local owner = keyset.owner
local owner_str = kres.dname2str(owner)
local keyset_orig = trust_anchors.keysets[owner]
-- Set up trust_anchors.keysets[owner]
if keyset_orig then
if keyset_orig.managed then
panic('[ ta ] it is impossible to add an unmanaged TA for zone '
.. owner_str .. ' which already has a managed TA')
end
log_warn(ffi.C.LOG_GRP_TA, 'warning: extending previously set trust anchors for '
.. owner_str)
for _, ta in ipairs(keyset) do
table.insert(keyset_orig, ta)
end
end
-- Replace the TA store used for validation
if not keyset_publish(keyset) then
err = "when publishing the TA set"
-- trust_anchors.keysets[owner] was already updated to the
-- (partially) failing state, but I'm not sure how much to improve this
end
keyset.managed = false
trust_anchors.keysets[owner] = keyset
end
log_info(ffi.C.LOG_GRP_TA, 'New TA state:\n' .. trust_anchors.summary())
if err then
panic('[ ta ] .add() failed: ' .. err)
end
end,
-- Negative TA management
set_insecure = function (list)
assert(type(list) == 'table', 'parameter must be list of domain names (e.g. {"a.test", "b.example"})')
local store = kres.context().negative_anchors
for i = 1, #list do
local dname = kres.str2dname(list[i])
if trust_anchors.keysets[dname] then
error('cannot add NTA '..list[i]..' because it is TA. Use trust_anchors.remove() instead')
end
end
C.kr_ta_clear(store)
for i = 1, #list do
local dname = kres.str2dname(list[i])
C.kr_ta_add(store, dname, kres.type.DS, 0, nil, 0)
end
trust_anchors.insecure = list
end,
-- Return textual representation of all TAs (incl. negative)
-- It's meant for human consumption.
summary = function (single_owner)
if single_owner then -- single domain
return ta_str(single_owner)
end
-- all domains
local msg = ''
local ta_count = 0
local seen = {}
for _, nta_str in pairs(trust_anchors.insecure) do
local owner = kres.str2dname(nta_str)
seen[owner] = true
msg = msg .. ta_str(owner)
end
for owner, _ in pairs(trust_anchors.keysets) do
if not seen[owner] then
ta_count = ta_count + 1
msg = msg .. ta_str(owner)
end
end
if ta_count == 0 then
msg = msg .. 'No valid trust anchors, DNSSEC validation is disabled\n'
end
return msg
end,
}
-- Syntactic sugar for TA store
setmetatable(trust_anchors, {
__newindex = function (t,k,v)
if k == 'file' then
upgrade_required('trust_anchors.file was removed, use trust_anchors.add_file()')
elseif k == 'negative' then
upgrade_required('trust_anchors.negative was removed, use trust_anchors.set_insecure()')
elseif k == 'keyfile_default' then
upgrade_required('trust_anchors.keyfile_default is now compiled in, see trust_anchors.remove()')
else rawset(t, k, v) end
end,
})
return trust_anchors
.. SPDX-License-Identifier: GPL-3.0-or-later
.. warning:: Options in this section are intended only for expert users and
normally should not be needed.
Since version 4.0, **DNSSEC validation is enabled by default**.
If you really need to turn DNSSEC off and are okay with lowering security of your
system by doing so, add the following snippet to your configuration file.
.. code-block:: lua
-- turns off DNSSEC validation
trust_anchors.remove('.')
The resolver supports DNSSEC including :rfc:`5011` automated DNSSEC TA updates
and :rfc:`7646` negative trust anchors. Depending on your distribution, DNSSEC
trust anchors should be either maintained in accordance with the distro-wide
policy, or automatically maintained by the resolver itself.
In practice this means that you can forget about it and your favorite Linux
distribution will take care of it for you.
Following functions allow to modify DNSSEC configuration *if you really have to*:
.. function:: trust_anchors.add_file(keyfile[, readonly = false])
:param string keyfile: path to the file.
:param readonly: if true, do not attempt to update the file.
The format is standard zone file, though additional information may be persisted in comments.
Either DS or DNSKEY records can be used for TAs.
If the file does not exist, bootstrapping of *root* TA will be attempted.
If you want to use bootstrapping, install `lua-http`_ library.
Each file can only contain records for a single domain.
The TAs will be updated according to :rfc:`5011` and persisted in the file (if allowed).
Example output:
.. code-block:: lua
> trust_anchors.add_file('root.key')
[ ta ] new state of trust anchors for a domain:
. 165488 DS 19036 8 2 49AAC11D7B6F6446702E54A1607371607A1A41855200FD2CE1CDDE32F24E8FB5
nil
[ ta ] key: 19036 state: Valid
.. function:: trust_anchors.remove(zonename)
Remove specified trust anchor from trusted key set. Removing trust anchor for the root zone effectively disables DNSSEC validation (unless you configured another trust anchor).
.. code-block:: lua
> trust_anchors.remove('.')
true
If you want to disable DNSSEC validation for a particular domain but keep it enabled for the rest of DNS tree, use :func:`trust_anchors.set_insecure`.
.. envvar:: trust_anchors.hold_down_time = 30 * day
:return: int (default: 30 * day)
Modify RFC5011 hold-down timer to given value. Intended only for testing purposes. Example: ``30 * sec``
.. envvar:: trust_anchors.refresh_time = nil
:return: int (default: nil)
Modify RFC5011 refresh timer to given value (not set by default), this will force trust anchors
to be updated every N seconds periodically instead of relying on RFC5011 logic and TTLs.
Intended only for testing purposes.
Example: ``10 * sec``
.. envvar:: trust_anchors.keep_removed = 0
:return: int (default: 0)
How many ``Removed`` keys should be held in history (and key file) before being purged.
Note: all ``Removed`` keys will be purged from key file after restarting the process.
.. function:: trust_anchors.set_insecure(nta_set)
:param table nta_list: List of domain names (text format) representing NTAs.
When you use a domain name as an *negative trust anchor* (NTA), DNSSEC validation will be turned off at/below these names.
Each function call replaces the previous NTA set. You can find the current active set in ``trust_anchors.insecure`` variable.
If you want to disable DNSSEC validation completely use :func:`trust_anchors.remove` function instead.
Example output:
.. code-block:: lua
> trust_anchors.set_insecure({ 'bad.boy', 'example.com' })
> trust_anchors.insecure
[1] => bad.boy
[2] => example.com
.. warning:: If you set NTA on a name that is not a zone cut,
it may not always affect names not separated from the NTA by a zone cut.
.. function:: trust_anchors.add(rr_string)
:param string rr_string: DS/DNSKEY records in presentation format (e.g. ``. 3600 IN DS 19036 8 2 49AAC11...``)
Inserts DS/DNSKEY record(s) into current keyset. These will not be managed or updated, use it only for testing
or if you have a specific use case for not using a keyfile.
.. note:: Static keys are very error-prone and should not be used in production. Use :func:`trust_anchors.add_file` instead.
Example output:
.. code-block:: lua
> trust_anchors.add('. 3600 IN DS 19036 8 2 49AAC11...')
.. function:: trust_anchors.summary()
Return string with summary of configured DNSSEC trust anchors, including negative TAs.
.. _lua-http: https://luarocks.org/modules/daurnimator/http
\ No newline at end of file
-- SPDX-License-Identifier: GPL-3.0-or-later
modules.load('ta_update')
-- check prerequisites
local has_http = pcall(require, 'kres_modules.http') and pcall(require, 'http.request')
if not has_http then
-- skipping bootstrap tests because http module is not not installed
os.exit(77)
end
local cqueues = require("cqueues")
local socket = require("cqueues.socket")
-- unload modules which are not related to this test
if ta_signal_query then
modules.unload('ta_signal_query')
end
if priming then
modules.unload('priming')
end
if detect_time_skew then
modules.unload('detect_time_skew')
end
-- Self-checks on globals
assert(help() ~= nil)
assert(worker.id ~= nil)
-- Self-checks on facilities
assert(worker.stats() ~= nil)
assert(net.interfaces() ~= nil)
-- Self-checks on loaded stuff
assert(#modules.list() > 0)
-- Self-check timers
ev = event.recurrent(1 * sec, function () return 1 end)
event.cancel(ev)
ev = event.after(0, function () return 1 end)
-- do not attempt to contact outside world using DNS, operate only on cache
net.ipv4 = false
net.ipv6 = false
-- do not listen, test is driven by config code
env.KRESD_NO_LISTEN = true
-- start test webserver
local function start_webserver()
-- srvout = io.popen('luajit webserv.lua')
-- TODO
os.execute('luajit webserv.lua >/dev/null 2>&1 &')
-- assert(srvout, 'failed to start webserver')
end
local function wait_for_webserver()
local starttime = os.time()
local connected = false
while not connected and os.difftime(os.time(), starttime) < 10 do
local con = socket.connect("localhost", 8080)
connected, msg = pcall(con.connect, con, 3)
cqueues.sleep (0.3)
end
assert(connected, string.format('unable to connect to web server: %s', msg))
end
local host = 'https://localhost:8080/'
-- avoid interference with configured keyfile_default
trust_anchors.remove('.')
local function test_err_cert()
trust_anchors.bootstrap_ca = 'x509/wrongca.pem'
trust_anchors.bootstrap_url = host .. 'ok1.xml'
boom(trust_anchors.add_file, {'ok1.keys'},
'fake server certificate is detected')
end
local function test_err_xml(testname, testdesc)
return function()
trust_anchors.bootstrap_ca = 'x509/ca.pem'
trust_anchors.bootstrap_url = host .. testname .. '.xml'
boom(trust_anchors.add_file, {testname .. '.keys'}, testdesc)
end
end
-- dumb test, right now it cannot check content of keys because
-- it does not get written until refresh fetches DNSKEY from network
-- (and bypassing network using policy bypasses also validation
-- so it does not test anything)
local function test_ok_xml(testname, testdesc)
return function()
trust_anchors.bootstrap_url = host .. testname .. '.xml'
trust_anchors.remove('.')
same(trust_anchors.add_file(testname .. '.keys'), nil, testdesc)
end
end
return {
start_webserver,
wait_for_webserver,
test_err_cert,
test_err_xml('err_attr_extra_attr', 'bogus TA XML with an extra attribute'),
test_err_xml('err_attr_validfrom_invalid', 'bogus TA XML with invalid validFrom value'),
test_err_xml('err_attr_validfrom_missing', 'bogus TA XML without mandatory validFrom attribute'),
test_err_xml('err_elem_extra', 'bogus TA XML with an extra element'),
test_err_xml('err_elem_missing', 'bogus TA XML without mandatory element'),
test_err_xml('err_multi_ta', 'bogus TA XML with multiple TAs'),
test_err_xml('unsupp_nonroot', 'unsupported TA XML for non-root zone'),
test_err_xml('unsupp_xml_v11', 'unsupported TA XML with XML v1.1'),
test_err_xml('ok0_badtimes', 'TA XML with no valid keys'),
test_ok_xml('ok1_expired1', 'TA XML with 1 valid and 1 expired key'),
test_ok_xml('ok1_notyet1', 'TA XML with 1 valid and 1 not yet valid key'),
test_ok_xml('ok1', 'TA XML with 1 valid key'),
test_ok_xml('ok2', 'TA XML with 2 valid keys'),
}
<?xml version="1.0" encoding="UTF-8"?>
<TrustAnchor id="FC4A93EC-9F4E-4597-A766-AD6723E4A56E" source="https://localhost/err_attr_extra_attr.xml">
<Zone>.</Zone>
<KeyDigest unknownattr="test" id="Kjqmt7v" validFrom="2010-07-15T00:00:00+00:00" validUntil="2019-01-11T00:00:00+00:00">
<KeyTag>19036</KeyTag>
<Algorithm>8</Algorithm>
<DigestType>2</DigestType>
<Digest>49AAC11D7B6F6446702E54A1607371607A1A41855200FD2CE1CDDE32F24E8FB5</Digest>
</KeyDigest>
<KeyDigest id="Klajeyz" validFrom="2017-02-02T00:00:00+00:00">
<KeyTag>20326</KeyTag>
<Algorithm>8</Algorithm>
<DigestType>2</DigestType>
<Digest>E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC683457104237C7F8EC8D</Digest>
</KeyDigest>
</TrustAnchor>
<?xml version="1.0" encoding="UTF-8"?>
<TrustAnchor id="ABD668AB-52DF-4A59-80E3-16CE6341BC55" source="https://localhost/err_attr_validfrom_invalid.xml">
<Zone>.</Zone>
<KeyDigest id="Kjqmt7v" validFrom="2010-07-32T00:00:00+00:00" validUntil="2019-01-11T00:00:00+00:00">
<KeyTag>19036</KeyTag>
<Algorithm>8</Algorithm>
<DigestType>2</DigestType>
<Digest>49AAC11D7B6F6446702E54A1607371607A1A41855200FD2CE1CDDE32F24E8FB5</Digest>
</KeyDigest>
<KeyDigest id="Klajeyz" validFrom="2017-02-02T00:00:00+00:00">
<KeyTag>20326</KeyTag>
<Algorithm>8</Algorithm>
<DigestType>2</DigestType>
<Digest>E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC683457104237C7F8EC8D</Digest>
</KeyDigest>
</TrustAnchor>
<?xml version="1.0" encoding="UTF-8"?>
<TrustAnchor id="3513058C-4041-40CC-AF0A-D3CCD70F962B" source="https://localhost/err_attr_validfrom_missing.xml">
<Zone>.</Zone>
<KeyDigest id="Kjqmt7v" validUntil="2019-01-11T00:00:00+00:00">
<KeyTag>19036</KeyTag>
<Algorithm>8</Algorithm>
<DigestType>2</DigestType>
<Digest>49AAC11D7B6F6446702E54A1607371607A1A41855200FD2CE1CDDE32F24E8FB5</Digest>
</KeyDigest>
<KeyDigest id="Klajeyz" validFrom="2017-02-02T00:00:00+00:00">
<KeyTag>20326</KeyTag>
<Algorithm>8</Algorithm>
<DigestType>2</DigestType>
<Digest>E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC683457104237C7F8EC8D</Digest>
</KeyDigest>
</TrustAnchor>
<?xml version="1.0" encoding="UTF-8"?>
<TrustAnchor id="B1854D58-1867-4FA7-872F-0099D394114D" source="https://localhost/err_elem_extra.xml">
<Zone>.</Zone>
<KeyDigest id="Kjqmt7v" validFrom="2010-07-15T00:00:00+00:00" validUntil="2019-01-11T00:00:00+00:00">
<KeyTag>19036</KeyTag>
<Algorithm>8</Algorithm>
<DigestType>2</DigestType>
<Digest>49AAC11D7B6F6446702E54A1607371607A1A41855200FD2CE1CDDE32F24E8FB5</Digest>
</KeyDigest>
<KeyDigest id="Klajeyz" validFrom="2017-02-02T00:00:00+00:00">
<KeyTag>20326</KeyTag>
<Algorithm>8</Algorithm>
<DigestType>2</DigestType>
<Digest>E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC683457104237C7F8EC8D</Digest>
<UnknownElement>E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC683457104237C7F8EC8D</UnknownElement>
</KeyDigest>
</TrustAnchor>
<?xml version="1.0" encoding="UTF-8"?>
<TrustAnchor id="BB074095-3A42-4B13-9CC1-CFFF644D4D54" source="https://localhost/err_elem_missing.xml">
<Zone>.</Zone>
<KeyDigest id="Kjqmt7v" validFrom="2010-07-15T00:00:00+00:00" validUntil="2019-01-11T00:00:00+00:00">
<KeyTag>19036</KeyTag>
<Algorithm>8</Algorithm>
<DigestType>2</DigestType>
<Digest>49AAC11D7B6F6446702E54A1607371607A1A41855200FD2CE1CDDE32F24E8FB5</Digest>
</KeyDigest>
<KeyDigest id="Klajeyz" validFrom="2017-02-02T00:00:00+00:00">
<KeyTag>20326</KeyTag>
<Algorithm>8</Algorithm>
<!-- this element is missing: DigestType>2</DigestType-->
<Digest>E06D44B80B8F1D39A95C0B0D7C65D08458E880409BBC683457104237C7F8EC8D</Digest>
</KeyDigest>
</TrustAnchor>
<?xml version="1.0" encoding="UTF-8"?>
<TrustAnchor id="9DCE46E1-FC78-48E1-81B5-94E328790BB5" source="https://localhost/err_multi_ta.xml">
<Zone>.</Zone>
<KeyDigest id="1" validFrom="2000-02-02T00:00:00+00:00">
<KeyTag>2</KeyTag>
<Algorithm>8</Algorithm>
<DigestType>2</DigestType>
<Digest>1111111111111111111111111111111111111111111111111111111111111111</Digest>
</KeyDigest>
</TrustAnchor>
<TrustAnchor id="9DCE46E1-FC78-48E1-81B5-94E328790BB5" source="https://localhost/err_multi_ta.xml">
<Zone>test.</Zone>
<KeyDigest id="2" validFrom="2000-02-02T00:00:00+00:00">
<KeyTag>2</KeyTag>
<Algorithm>8</Algorithm>
<DigestType>2</DigestType>
<Digest>1111111111111111111111111111111111111111111111111111111111111111</Digest>
</KeyDigest>
</TrustAnchor>
<?xml version="1.0" encoding="UTF-8"?>
<TrustAnchor id="EDEDAA08-D2A0-421E-81DC-AF11F5A0CDCD" source="https://localhost/ok0_badtimes.xml">
<Zone>.</Zone>
<KeyDigest id="E" validFrom="2000-01-01T00:00:00+00:00" validUntil="2000-01-01T00:00:00+00:00">
<KeyTag>1</KeyTag>
<Algorithm>8</Algorithm>
<DigestType>2</DigestType>
<Digest>EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE</Digest>
</KeyDigest>
<KeyDigest id="F" validFrom="2001-01-01T00:00:00+00:00" validUntil="2001-01-01T00:00:00+00:00">
<KeyTag>2</KeyTag>
<Algorithm>8</Algorithm>
<DigestType>2</DigestType>
<Digest>FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF</Digest>
</KeyDigest>
</TrustAnchor>
<?xml version="1.0" encoding="UTF-8"?>
<TrustAnchor id="82E6CB77-12DF-4E61-BF49-367FB95A8BAA" source="https://localhost/ok1.xml">
<Zone>.</Zone>
<KeyDigest id="2" validFrom="2000-02-02T00:00:00+00:00">
<KeyTag>2</KeyTag>
<Algorithm>8</Algorithm>
<DigestType>2</DigestType>
<Digest>1111111111111111111111111111111111111111111111111111111111111111</Digest>
</KeyDigest>
</TrustAnchor>
<?xml version="1.0" encoding="UTF-8"?>
<TrustAnchor id="68463155-A857-4C7E-BCA6-2F6CC2EAC1BE" source="https://localhost/ok1_expired1.xml">
<Zone>.</Zone>
<KeyDigest id="F" validFrom="1990-01-01T00:00:00+00:00" validUntil="2000-01-01T00:00:00+00:00">
<KeyTag>1</KeyTag>
<Algorithm>8</Algorithm>
<DigestType>2</DigestType>
<Digest>FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF</Digest>
</KeyDigest>
<KeyDigest id="1" validFrom="2000-01-01T00:00:00+00:00">
<KeyTag>2</KeyTag>
<Algorithm>8</Algorithm>
<DigestType>2</DigestType>
<Digest>1111111111111111111111111111111111111111111111111111111111111111</Digest>
</KeyDigest>
</TrustAnchor>
<?xml version="1.0" encoding="UTF-8"?>
<TrustAnchor id="507B39D5-049E-467C-9E9A-F5BE597C9DDA" source="https://localhost/ok1_notyet1.xml">
<Zone>.</Zone>
<KeyDigest id="1" validFrom="2010-07-15T00:00:00+00:00">
<KeyTag>1</KeyTag>
<Algorithm>8</Algorithm>
<DigestType>2</DigestType>
<Digest>1111111111111111111111111111111111111111111111111111111111111111</Digest>
</KeyDigest>
<KeyDigest id="2" validFrom="2050-12-31T23:59:59+00:00">
<KeyTag>2</KeyTag>
<Algorithm>8</Algorithm>
<DigestType>2</DigestType>
<Digest>FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF</Digest>
</KeyDigest>
</TrustAnchor>