Commit bd5dd725 authored by lukas kotyza's avatar lukas kotyza
Browse files

cleanup

just cleaning up code - debug exceptions, TODOs, prints ...
parent 650c5e7a
......@@ -49,7 +49,7 @@ def main():
archive_path = uci.get('pakon.archive.path') or '/srv/pakon/pakon-archive.db'
_con = database.Database(archive_path)
_con.attach_database("/var/lib/pakon.db", "live")
_start = 0#test reason - 3600*24 #move flows from live DB to archive after 24hours
_start = 3600*24 #move flows from live DB to archive after 24hours
_now = int(time.mktime(datetime.datetime.now().timetuple()))
# database connection, table name, details (dict(from:i, to:i+1)), list(grouperu)
live_alert_table = tables.Alert(_con, logging, "live.alerts", {"from":None, "to":0})
......@@ -66,14 +66,13 @@ def main():
# all changes in live database is done, backup it
_con.dettach_database("live")
subprocess.call(["/usr/libexec/bckp_pakon/backup_sqlite.sh", "/var/lib/pakon.db", "/srv/pakon/pakon.db.xz"])
subprocess.call(["/usr/libexec/pakon-light/backup_sqlite.sh", "/var/lib/pakon.db", "/srv/pakon/pakon.db.xz"])
archive_count = _con.select("select count(*) from traffic", None)[0][0]
rowids_to_del = None
# TODO rewrite old method sel_sing_table to newer one (select)
if archive_count > hard_limit:
logging.warning('over {0} records in the archive database ({1}) -> deleting'.format(hard_limit, archive_count))
rowids_to_del = _con.select_single_table("traffic", "rowid", orderby="rowid desc limit -1 offset {0}".format(hard_limit))
rowids_to_del = _con.select("select rowid from traffic order by rowid desc limit -1 offset ?", (hard_limit, ))
if rowids_to_del:
_con.delete_in("traffic", "rowid", rowids_to_del)
......@@ -82,12 +81,15 @@ def main():
flow_rules = load_archive_rules("flow")
#if the rules changed (there is detail level that can't be generated using current rules)
#reset everything to detail level 0 -> perform the whole archivation again
max_flow_level = _con.select("select max(details) from traffic", None)[0][0] or 0
if max_flow_level > len(flow_rules):
flow_lvl_highest = int(max(list(flow_rules.keys())))
max_flow_level = int(_con.select("select max(details) from traffic", None)[0][0] or 0)
if max_flow_level > flow_lvl_highest:
logging.info("(flows):resetting all detail levels to 0")
_con.update("update traffic set details = 0", None)
max_alert_level = _con.select("select max(details) from alerts", None)[0][0] or 0
if max_alert_level > len(alert_rules):
alert_lvl_highest = int(max(list(flow_rules.keys())))
max_alert_level = int(_con.select("select max(details) from alerts", None)[0][0] or 0)
if max_alert_level > alert_lvl_highest:
logging.info("(alerts):resetting all detail levels to 0")
_con.update("update alerts set details = 0", None)
......
......@@ -23,29 +23,19 @@ class Table():
def int_parse(self, *args):
parsed = list()
for arg in args:
try:
if arg:
parsed.append(int(arg))
else:
parsed.append(0)
except ValueError:
raise Exception("valer")
except TypeError:
raise Exception("typer")
if arg:
parsed.append(int(arg))
else:
parsed.append(0)
return parsed
def float_parse(self, *args):
parsed = list()
for arg in args:
try:
if arg:
parsed.append(float(arg))
else:
parsed.append(0)
except ValueError:
raise Exception("valer")
except TypeError:
raise Exception("typer")
if arg:
parsed.append(float(arg))
else:
parsed.append(0)
return parsed
def delete_archived(self):
......@@ -161,8 +151,9 @@ class Flow(Table):
send, recv, window = self.int_parse(row['bytes_send'],
row['bytes_received'],
rule['window'])
cur_record = Record(start, end, _grouper, row['src_ip'], row['src_port'], row['dest_ip']
, row['dest_port'], row['proto'], row['app_proto'], send, recv)
cur_record = Record(start, end, dict(_grouper), row['src_ip'], row['src_port'],
row['dest_ip'], row['dest_port'], row['proto'],
row['app_proto'], send, recv)
if not prev_record:
prev_record = cur_record
elif prev_record.get('dest_port') == row['dest_port'] and prev_record.get('start') + prev_record.get('duration') + window > cur_record.get('start'):
......@@ -174,7 +165,7 @@ class Flow(Table):
self.to_ins.append(prev_record)
def merge(self, prev, cur):
"""merge two records
"""merge two flows
"""
if cur.get('start') + cur.get('duration') > prev.get('start') + prev.get('duration'):
prev.dur_plus(cur.get('duration'))
......@@ -228,17 +219,17 @@ class Alert(Table):
"start":_start
}
if self.details['from'] is not None:
sql = str("select rowid, * from {0}"#alerts
sql = str("select rowid, * from alerts"
" where sid = :sid and signature = :sig"
" {1} {2}"
" {0} {1}"
" and details = :det and start < :start order by dest_port, start")
else:
sql = str("select rowid, * from {0}"#live.alerts
sql = str("select rowid, * from live.alerts"
" where sid = :sid and signature = :sig"
" {1} {2}"
" {0} {1}"
" and start < :start order by dest_port, start")
sql, data_bind = self.sev_cat_handle(rule['severity'], rule['category'], sql, data_bind)
results = self.database.select(sql.format(self.table), (data_bind))
results = self.database.select(sql, (data_bind))
if not results:
self.logging.warning("a rule ({0}) has no matches in database".format(rule))
return
......@@ -325,5 +316,5 @@ class Alert(Table):
else:
cat = " and category = :cat"
data_bind['cat'] = category
sql = sql.format(self.table, sev, cat)
sql = sql.format(sev, cat)
return (sql, data_bind)
config monitor 'monitor'
option notify_new_devices 0
list interface 'br-lan'
list interface 'br-guest_turris'
config archive 'archive'
option keep 4w
config archive_rule
option up_to 1d
option window 60
option size_threshold 4096
config archive_rule
option up_to 3d
option window 900
option size_threshold 8192
config archive_rule
option up_to 7d
option window 1800
config archive_rule
option up_to 14d
option window 3600
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment