diff --git a/daemon/README.rst b/daemon/README.rst
index b1762f7265c15bef9ba0b455f0c3d9a318c97302..b87e8845c6349589c9f2b3e2152dcb9f23739227 100644
--- a/daemon/README.rst
+++ b/daemon/README.rst
@@ -106,7 +106,7 @@ You can add start and stop processes on runtime based on the load.
 
 .. _daemon-reuseport:
 
-.. note:: On recent Linux supporting ``SO_REUSEPORT`` (since 3.9, backported to RHEL 2.6.32) it is also able to bind to the same endpoint and distribute the load between the forked processes. If the kernel doesn't support it, you can still fork multiple processes on different ports, and do load balancing externally (on firewall or with `dnsdist <http://dnsdist.org/>`_).
+.. note:: On recent Linux supporting ``SO_REUSEPORT`` (since 3.9, backported to RHEL 2.6.32) it is also able to bind to the same endpoint and distribute the load between the forked processes. If your OS doesn't support it, you can :ref:`use supervisor <daemon-supervised>` that is going to bind to sockets before starting multiple processes.
 
 Notice the absence of an interactive CLI. You can attach to the the consoles for each process, they are in ``rundir/tty/PID``.
 
@@ -122,18 +122,18 @@ Error or debug logs aren't captured, but you can find them in the daemon standar
 This is also a way to enumerate and test running instances, the list of files int ``tty`` correspond to list
 of running processes, and you can test the process for liveliness by connecting to the UNIX socket.
 
-.. warning:: This is very basic way to orchestrate multi-core deployments and doesn't scale in multi-node clusters. Keep an eye on the prepared ``hive`` module that is going to automate everything from service discovery to deployment and consistent configuration.
+.. _daemon-supervised:
 
 Running supervised
 ==================
 
-Knot Resolver can run under a supervisor to allow for graceful restarts, watchdog process and socket activation. This way the supervisor binds to sockets and lends them to resolver daemon. Thus if the resolver terminates or is killed, the sockets are still active and no queries are dropped.
+Knot Resolver can run under a supervisor to allow for graceful restarts, watchdog process and socket activation. This way the supervisor binds to sockets and lends them to the resolver daemon. If the resolver terminates or is killed, the sockets remain open and no queries are dropped.
 
 The watchdog process must notify kresd about active file descriptors, and kresd will automatically determine the socket type and bound address, thus it will appear as any other address. There's a tiny supervisor script for convenience, but you should have a look at `real process managers`_.
 
 .. code-block:: bash
 
-   $ python scripts/supervisor.py ./daemon/kresd 127.0.0.1@53
+   $ python scripts/supervisor.py ./daemon/kresd -a 127.0.0.1
    $ [system] interactive mode
    > quit()
    > [2016-03-28 16:06:36.795879] process finished, pid = 99342, status = 0, uptime = 0:00:01.720612
@@ -900,6 +900,47 @@ notifications for daemon.
       end)
       e.cancel(e)
 
+Map over multiple forks
+^^^^^^^^^^^^^^^^^^^^^^^
+
+When daemon is running in forked mode, each process acts independently. This is good because it reduces software complexity and allows for runtime scaling, but not ideal because of additional operational burden.
+For example, when you want to add a new policy, you'd need to add it to either put it in the configuration, or execute command on each process independently. The daemon simplifies this by promoting process group leader which is able to execute commands synchronously over forks.
+
+.. function:: map(expr)
+
+   Run expression synchronously over all forks, results are returned as a table ordered as forks. Expression can be any valid expression in Lua.
+
+
+   Example:
+
+   .. code-block:: lua
+
+      -- Current instance only
+      hostname()
+      localhost
+      -- Mapped to forks
+      map 'hostname()'
+      [1] => localhost
+      [2] => localhost
+      -- Get worker ID from each fork
+      map 'worker.id'
+      [1] => 0
+      [2] => 1
+      -- Get cache stats from each fork
+      map 'cache.stats()'
+      [1] => {
+          [hit] => 0
+          [delete] => 0
+          [miss] => 0
+          [insert] => 0
+      }
+      [2] => {
+          [hit] => 0
+          [delete] => 0
+          [miss] => 0
+          [insert] => 0
+      }
+
 Scripting worker
 ^^^^^^^^^^^^^^^^
 
@@ -915,6 +956,12 @@ specified worker count and process rank.
 
    Return current worker ID (starting from `0` up to `worker.count - 1`)
 
+
+.. envvar:: pid (number)
+
+   Current worker process PID.
+
+
 .. function:: worker.stats()
 
    Return table of statistics.
diff --git a/modules/daf/README.rst b/modules/daf/README.rst
index d654aabda646e994fd2a480517c558b26fcddc97..34026fe0eef5a51888bbf972c18d9cbc6456d8d5 100644
--- a/modules/daf/README.rst
+++ b/modules/daf/README.rst
@@ -72,3 +72,59 @@ If you're not sure what firewall rules are in effect, see ``daf.rules``:
         [info] => qname ~ %w+.facebook.com AND src = 127.0.0.1/8 deny...
         [policy] => function: 0x1a3ede88
     }
+
+Web interface
+^^^^^^^^^^^^^
+
+If you have :ref:`HTTP/2 <mod-http>` loaded, the firewall automatically loads as a snippet.
+You can create, track, suspend and remove firewall rules from the web interface.
+
+RESTful interface
+^^^^^^^^^^^^^^^^^
+
+The module also exports a RESTful API for operations over rule chains.
+
+
+.. csv-table::
+    :header: "URL", "HTTP Verb", "Action"
+
+    "/daf", "GET", "Return JSON list of active rules."
+    "/daf", "POST", "Insert new rule, rule string is expected in body. Returns rule information in JSON."
+    "/daf/<id>", "GET", "Retrieve a rule matching given ID."
+    "/daf/<id>", "DELETE", "Delete a rule matching given ID."
+    "/daf/<id>/<prop>/<val>", "PATCH", "Modify given rule, for example /daf/3/active/false suspends rule 3."
+
+This interface is used by the web interface for all operations, but you can also use it directly
+for testing.
+
+.. code-block:: bash
+
+    # Get current rule set
+    $ curl -s -X GET http://localhost:8053/daf | jq .
+    {}
+
+    # Create new rule
+    $ curl -s -X POST -d "src = 127.0.0.1 pass" http://localhost:8053/daf | jq .
+    {
+      "count": 0,
+      "active": true,
+      "info": "src = 127.0.0.1 pass",
+      "id": 1
+    }
+
+    # Disable rule
+    $ curl -s -X PATCH http://localhost:8053/daf/1/active/false | jq .
+    true
+
+    # Retrieve a rule information
+    $ curl -s -X GET http://localhost:8053/daf/1 | jq .
+    {
+      "count": 4,
+      "active": true,
+      "info": "src = 127.0.0.1 pass",
+      "id": 1
+    }
+
+    # Delete a rule
+    $ curl -s -X DELETE http://localhost:8053/daf/1 | jq .
+    true
diff --git a/modules/daf/daf.lua b/modules/daf/daf.lua
index 4bb1207a366850a5101db21b6ab7d2917fa5e8df..a03d9b8616a9b69cb2f8bb7b7064e5be2a03d5a9 100644
--- a/modules/daf/daf.lua
+++ b/modules/daf/daf.lua
@@ -117,6 +117,11 @@ local function compile(query)
 	return parse_query(g)
 end
 
+-- @function Describe given rule for presentation
+local function rule_info(r)
+	return {info=r.info, id=r.rule.id, active=(r.rule.suspended ~= true), count=r.rule.count}
+end
+
 -- Module declaration
 local M = {
 	rules = {}
@@ -170,6 +175,15 @@ function M.del(id)
 	end
 end
 
+-- @function Find a rule
+function M.get(id)
+	for i, r in ipairs(M.rules) do
+		if r.rule.id == id then
+			return r
+		end
+	end
+end
+
 -- @function Enable/disable a rule
 function M.toggle(id, val)
 	for i, r in ipairs(M.rules) do
@@ -193,11 +207,21 @@ local function api(h, stream)
 	local m = h:get(':method')
 	-- GET method
 	if m == 'GET' then
-		local ret = {}
-		for _, r in ipairs(M.rules) do
-			table.insert(ret, {info=r.info, id=r.rule.id, active=(r.rule.suspended ~= true), count=r.rule.count})
+		local path = h:get(':path')
+		local id = tonumber(path:match '/([^/]*)$')
+		if id then
+			local r = M.get(id)
+			if r then
+				return rule_info(r)
+			end
+			return 404, '"No such rule"' -- Not found
+		else
+			local ret = {}
+			for _, r in ipairs(M.rules) do
+				table.insert(ret, rule_info(r))
+			end
+			return ret
 		end
-		return ret
 	-- DELETE method
 	elseif m == 'DELETE' then
 		local path = h:get(':path')
@@ -206,7 +230,7 @@ local function api(h, stream)
 			if M.del(id) then
 				return tojson(true)
 			end
-			return 404, 'No such rule' -- Not found
+			return 404, '"No such rule"' -- Not found
 		end
 		return 400 -- Request doesn't have numeric id
 	-- POST method
@@ -214,8 +238,8 @@ local function api(h, stream)
 		local query = stream:get_body_as_string()
 		if query then
 			local ok, r, err = pcall(M.add, query)
-			if not ok then return 500, r end
-			return {info=r.info, id=r.rule.id, active=(r.rule.suspended ~= true), count=r.rule.count}
+			if not ok then return 500, string.format('"%s"', r) end
+			return rule_info(r)
 		end
 		return 400
 	-- PATCH method
@@ -231,10 +255,10 @@ local function api(h, stream)
 			if M.toggle(id, val == 'true') then
 				return tojson(true)
 			else
-				return 404, 'No such rule'
+				return 404, '"No such rule"'
 			end
 		else
-			return 501, 'Action not implemented'
+			return 501, '"Action not implemented"'
 		end
 	end
 end
diff --git a/modules/policy/README.rst b/modules/policy/README.rst
index ed8229661811d60751292d0324980d666e25fd26..30fb72934b92abbaedac3ebf4b463efe656efb38 100644
--- a/modules/policy/README.rst
+++ b/modules/policy/README.rst
@@ -150,7 +150,6 @@ Properties
 
   :param action: the default action for match in the zone (e.g. RH-value `.`)
   :param path: path to zone file | database
-  :param format: set to `'lmdb'` for binary DB, currently NYI
   
   Enforce RPZ_ rules. This can be used in conjunction with published blocklist feeds.
   The RPZ_ operation is well described in this `Jan-Piet Mens's post`_,