diff --git a/doc/configuration.rst b/doc/configuration.rst
index 5cd4cd698b3cab1546d7de586c27d6c213aa1385..18ba3c825a88efec32ae5da8a2c3c86a82590dbc 100644
--- a/doc/configuration.rst
+++ b/doc/configuration.rst
@@ -677,6 +677,14 @@ to non-default values.
    The consequence is that new zone files might be discovered and reloaded,
    even for zones that do not relate to updated catalog zone.
 
+   Catalog zones never expire automatically, regardless of what is declared
+   in the catalog zone SOA. However, a catalog zone can be expired manually
+   at any time using `knotc -f zone-purge +expire`.
+
+   Currently, expiration of a catalog zone doesn't have any effect on its
+   member zones. This will likely change in the future depending on the
+   Internet Draft.
+
 .. WARNING::
 
    The server does not work well if one member zone appears in two catalog zones
diff --git a/src/knot/events/handlers/refresh.c b/src/knot/events/handlers/refresh.c
index 3b5065adbc04570971b0b15e30dcd015a01e083e..e73adb310ee2da27e386c2f3447897fd4f7468fd 100644
--- a/src/knot/events/handlers/refresh.c
+++ b/src/knot/events/handlers/refresh.c
@@ -193,7 +193,7 @@ static void finalize_edns_expire(struct refresh_data *data)
 
 static void fill_expires_in(char *expires_in, size_t size, const struct refresh_data *data)
 {
-	if (!data->zone->is_catalog_flag) {
+	if (data->zone->timers.next_expire > 0) {
 		(void)snprintf(expires_in, size,
 		               ", expires in %u seconds", data->expire_timer);
 	}
@@ -1338,17 +1338,19 @@ int event_refresh(conf_t *conf, zone_t *zone)
 
 	if (ret == KNOT_EOK) {
 		assert(trctx.expire_timer != EXPIRE_TIMER_INVALID);
-		zone->timers.next_expire = now + trctx.expire_timer;
 		zone->timers.next_refresh = now + knot_soa_refresh(soa->rdata);
-		zone->timers.last_refresh_ok = true;
-
 		limit_next(conf, zone->name, C_REFRESH_MIN_INTERVAL,
 		           C_REFRESH_MAX_INTERVAL, now,
 		           &zone->timers.next_refresh);
-		if (trctx.expire_timer == knot_soa_expire(soa->rdata)) {
-			limit_next(conf, zone->name, C_EXPIRE_MIN_INTERVAL,
-			           C_EXPIRE_MAX_INTERVAL, now,
-			           &zone->timers.next_expire);
+		zone->timers.last_refresh_ok = true;
+
+		if (!zone->is_catalog_flag) {  /* For catz, keep next_expire. */
+			zone->timers.next_expire = now + trctx.expire_timer;
+			if (trctx.expire_timer == knot_soa_expire(soa->rdata)) {
+				limit_next(conf, zone->name, C_EXPIRE_MIN_INTERVAL,
+				           C_EXPIRE_MAX_INTERVAL, now,
+				           &zone->timers.next_expire);
+			}
 		}
 	} else {
 		time_t next;
diff --git a/src/knot/zone/timers.c b/src/knot/zone/timers.c
index 7210f35112d573ac2661c9e39989f7f52715eadc..32c22b32e023379e7f767ea008b8630ccf4dd2cb 100644
--- a/src/knot/zone/timers.c
+++ b/src/knot/zone/timers.c
@@ -165,6 +165,7 @@ int zone_timers_read(knot_lmdb_db_t *db, const knot_dname_t *zone,
 	knot_lmdb_abort(&txn);
 
 	// backward compatibility
+	// For catalog zones, next_expire is cleaned up later by zone_timers_sanitize().
 	if (timers->next_expire == 0 && timers->last_refresh > 0) {
 		timers->next_expire = timers->last_refresh + timers->soa_expire;
 	}
diff --git a/src/knot/zone/zone.c b/src/knot/zone/zone.c
index 18bd80ed99a3d4eebc9374b7b4abc0cdd76c0a34..b942eb206a9f205e3a59c5de852ef6e75d7b2add 100644
--- a/src/knot/zone/zone.c
+++ b/src/knot/zone/zone.c
@@ -594,6 +594,9 @@ void zone_timers_sanitize(conf_t *conf, zone_t *zone)
 	if (zone_is_slave(conf, zone)) {
 		// assume now if we don't know
 		time_set_default(&zone->timers.next_refresh, now);
+		if (zone->is_catalog_flag) {
+			zone->timers.next_expire = 0;
+		}
 	} else {
 		// invalidate if we don't have a master
 		zone->timers.last_refresh = 0;
diff --git a/tests-extra/tests/catalog/expire/data/catalog1.zone b/tests-extra/tests/catalog/expire/data/catalog1.zone
new file mode 100644
index 0000000000000000000000000000000000000000..e334ddbc8879b4bbe93a60d8746751cbc0d1229d
--- /dev/null
+++ b/tests-extra/tests/catalog/expire/data/catalog1.zone
@@ -0,0 +1,10 @@
+$ORIGIN catalog1.
+$TTL 0
+
+@ SOA ns admin 1 2 2 8 600
+  NS ns
+ns AAAA ::0
+version TXT "2"
+foo.zones PTR cataloged1.
+not.zones.in PTR not-cataloged1.
+zones PTR not-cataloged3.
diff --git a/tests-extra/tests/catalog/expire/data/cataloged1.zone b/tests-extra/tests/catalog/expire/data/cataloged1.zone
new file mode 100644
index 0000000000000000000000000000000000000000..0d563b2891bcd27d3c05371530dc1b9bd15e1517
--- /dev/null
+++ b/tests-extra/tests/catalog/expire/data/cataloged1.zone
@@ -0,0 +1,6 @@
+$ORIGIN cataloged1.
+$TTL 1200
+
+@ SOA ns admin 10001 5 5 15 600
+  NS ns
+ns AAAA ::0
diff --git a/tests-extra/tests/catalog/expire/test.py b/tests-extra/tests/catalog/expire/test.py
new file mode 100644
index 0000000000000000000000000000000000000000..2fa5b87e7cffeabf087a71bc889d16974977cfdb
--- /dev/null
+++ b/tests-extra/tests/catalog/expire/test.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python3
+
+'''Test of Interpreted Catalog zone (non-)expiration.'''
+
+from dnstest.test import Test
+
+import glob
+import os
+import shutil
+
+t = Test()
+
+master = t.server("knot")
+slave = t.server("knot")
+
+# Zones setup
+zone = t.zone("catalog1.", storage=".")
+members = t.zone("cataloged1.", storage=".")
+
+t.link(zone, master, slave, ixfr=True)
+
+master.cat_interpret(zone[0])
+slave.cat_interpret(zone[0])
+
+os.mkdir(master.dir + "/catalog")
+for zf in glob.glob(t.data_dir + "/*.zone"):
+    shutil.copy(zf, master.dir + "/catalog")
+
+t.start()
+
+slave.zones_wait(members)
+master.stop() # even regular answers must be blocked (to prevent refresh)
+
+# Check non-expiration of catalog.
+t.sleep(10)  # greater than the SOA expire
+resp = slave.dig("catalog1.", "SOA", udp=False, tsig=True)
+resp.check(rcode="NOERROR")
+resp = slave.dig("cataloged1.", "SOA", udp=False, tsig=True)
+resp.check(rcode="NOERROR")
+
+# Check regular expiration of member zones.
+t.sleep(7)  # together with previous sleep greater than members expire
+resp = slave.dig("catalog1.", "SOA", udp=False, tsig=True)
+resp.check(rcode="NOERROR")
+resp = slave.dig("cataloged1.", "SOA", udp=False, tsig=True)
+resp.check(rcode="SERVFAIL")
+
+master.start()
+slave.zones_wait(members)
+
+# Check manual expiration of catalog.
+master.ctl("zone-purge -f +expire %s" % zone[0].name, wait=True)
+slave.ctl("zone-purge -f +expire %s" % zone[0].name, wait=True)
+resp = master.dig("catalog1.", "SOA", udp=False, tsig=True)
+resp.check(rcode="SERVFAIL")
+resp = slave.dig("catalog1.", "SOA", udp=False, tsig=True)
+resp.check(rcode="SERVFAIL")
+# State of members after a catalog expire isn't standardised yet.
+# Add a check for it in the future.
+
+t.end()