9
0
Fork 1

Fixing with last rules.

This commit is contained in:
Johan Björklund 2024-11-21 12:43:54 +01:00
parent 9b0df6b2d1
commit d0a083b0b2
Signed by: bjorklund
GPG key ID: 5E8401339C7F5037
3 changed files with 721 additions and 141 deletions

View file

@ -0,0 +1,22 @@
[
{
"rulename": "ramnit",
"if": {
"malware.name": "ramnit-.*$"
},
"then": {
"classification.identifier": "ramnit"
}
},
{
"rulename": "default",
"if": {
"malware.name": ".*",
"classification.taxonomy": "malicious code",
"classification.identifier": ""
},
"then": {
"classification.identifier": "{msg[malware.name]}"
}
}
]

View file

@ -0,0 +1,550 @@
"""Sample notification rules for the Accessible-RDP feed
"""
import datetime
import json
import os
import traceback
import ipaddress
from collections import defaultdict
from datetime import timedelta
from psycopg2.extras import RealDictConnection
#from intelmq_certbund_contact.rulesupport import \
# Directive, most_specific_matches
from intelmq_certbund_contact.rulesupport import Directive, Match
from intelmqmail.db import open_db_connection
#squelch_time defines the amount of time to wait between "identical" events
#It's a dictionary of dictionaries
#First level keys defines the email-adress of the recipient. default is used if the email isn't listed
#Second level keys are feed-names. default for feeds that aren't specified.
#Second level values are used by datetime.timedelta(). So basically any of weeks=,days=,hours=,minutes=,seconds=
#Actual squelch time is the first defined value of email->feed, default->feed, email->default, default->default
squelch_time = {
'default' : {
'default' : timedelta(days=7), # Use this for all feeds that aren't listed specifically
'Drone' : timedelta(days=2), #We should nag about malware infested systems. But let's give them time to fix.
'Sinkhole-HTTP-Drone' : timedelta(days=2), #We should nag about malware infested systems. But let's give them time to fix.
'Drone-Brute-Force' : timedelta(days=2), #We should nag about malware infested systems. But let's give them time to fix.
},
'abuse@example.domain' : {
'default' : timedelta(weeks=2), # Change the default value for non listed feeds
'Accessible-RDP' : timedelta(weeks=4) # Only report about RDP every 4th week
},
'fors@cert.sunet.se' : {
'default' : timedelta(days=8),
'Drone' : timedelta(days=1),
},
}
# Create a base directive for a shadowserver feed
def shadowserver_csv_entry(basename):
return Directive(template_name="shadowserver_csv_" + basename,
notification_format="shadowserver",
event_data_format="csv_" + basename,
notification_interval=86400)
# Define feeds that we support
# '<feedname>' : {
# 'squelch_fields' : ('<field1>','<field2>',...),
# 'shadowserver_mapping' : shadowserver_csv_entry('<feedname>'),
# }
# feedname is the events feed.name
# squelch_fields are those fields that together with the feedname make the event unique.
# They are used by the squelch function so we don't send out new alerts
# about an event that we already sent out an email for.
# Set to False if the feed shouldn't be squelched.
# shadowserver_mapping creates a base Directive for this feed
feeds = {
#'Accessible-ADB',
#'Accessible-AFP',
#'Accessible-ARD',
#'Accessible-CoAP',
#'Accessible-CWMP',
#'Accessible-Cisco-Smart-Install',
'Accessible-FTP' : {
'squelch_fields' : ('source.ip','source.port'),
'shadowserver_mapping' : shadowserver_csv_entry('Accessible-FTP'),
},
#'Accessible-HTTP',
#'Accessible-Hadoop',
'Accessible-RDP' : {
'squelch_fields' : ('source.ip','source.port'),
'shadowserver_mapping' : shadowserver_csv_entry('Accessible-RDP'),
},
#'Accessible-Rsync',
'Accessible-SMB' : {
'squelch_fields' : ('source.ip','source.port'),
'shadowserver_mapping' : shadowserver_csv_entry('Accessible-SMB'),
},
'Accessible-Telnet' : {
'squelch_fields' : ('source.ip','source.port'),
'shadowserver_mapping' : shadowserver_csv_entry('Accessible-Telnet'),
},
#'Accessible-Ubiquiti-Discovery-Service',
'Accessible-VNC' : {
'squelch_fields' : ('source.ip','source.port'),
'shadowserver_mapping' : shadowserver_csv_entry('Accessible-VNC'),
},
'Amplification-DDoS-Victim' : {
'squelch_fields' : (False),
'shadowserver_mapping' : shadowserver_csv_entry('Amplification-DDoS-Victim'),
},
'Blacklisted-IP' : {
'squelch_fields' : ('source.ip','extra.source'),
'shadowserver_mapping' : shadowserver_csv_entry('Block-Listed-IP'),
},
'Block-Listed-IP' : {
'squelch_fields' : ('source.ip','extra.source'),
'shadowserver_mapping' : shadowserver_csv_entry('Block-Listed-IP'),
},
#'Compromised-Website',
#'DNS-Open-Resolvers',
#'Darknet',
'Drone' : {
'squelch_fields' : ('source.ip', 'malware.name', 'extra.public_source'),
'shadowserver_mapping' : shadowserver_csv_entry('Drone'),
},
'Drone-Brute-Force' : {
'squelch_fields' : ('source.ip', 'protocol.application'),
'shadowserver_mapping' : shadowserver_csv_entry('Drone-Brute-Force'),
},
#'HTTP-Scanners',
#'ICS-Scanners',
#'IPv6-Sinkhole-HTTP-Drone',
'Microsoft-Sinkhole' : {
'squelch_fields' : ('source.ip', 'malware.name'),
'shadowserver_mapping' : shadowserver_csv_entry('Microsoft-Sinkhole'),
},
#'NTP-Monitor',
#'NTP-Version',
#'Open-Chargen',
#'Open-DB2-Discovery-Service',
'Open-Elasticsearch' : {
'squelch_fields' : ('source.ip', 'source.port'),
'shadowserver_mapping' : shadowserver_csv_entry('Open-Elasticsearch'),
},
#'Open-IPMI',
#'Open-IPP',
'Open-LDAP' : {
'squelch_fields' : ('source.ip', 'source.port', 'protocol.transport'),
'shadowserver_mapping' : shadowserver_csv_entry('Open-LDAP'),
},
'Open-LDAP-TCP' : {
'squelch_fields' : ('source.ip', 'source.port', 'protocol.transport'),
'shadowserver_mapping' : shadowserver_csv_entry('Open-LDAP-TCP'),
},
#'Open-MQTT',
#'Open-MSSQL',
'Open-Memcached' : {
'squelch_fields' : ('source.ip', 'source.port'),
'shadowserver_mapping' : shadowserver_csv_entry('Open-Memcached'),
},
'Open-MongoDB' : {
'squelch_fields' : ('source.ip', 'source.port'),
'shadowserver_mapping' : shadowserver_csv_entry('Open-MongoDB'),
},
#'Open-NATPMP',
#'Open-NetBIOS-Nameservice',
#'Open-Netis',
#'Open-Portmapper',
#'Open-QOTD',
#'Open-Redis',
#'Open-SNMP',
#'Open-SSDP',
#'Open-TFTP',
#'Open-XDMCP',
#'Open-mDNS',
#'Outdated-DNSSEC-Key',
#'Outdated-DNSSEC-Key-IPv6',
#'SSL-FREAK-Vulnerable-Servers',
'SSL-POODLE-Vulnerable-Servers' : {
'squelch_fields' : ('source.ip','source.port'),
'shadowserver_mapping' : shadowserver_csv_entry('SSL-POODLE-Vulnerable-Servers'),
},
#'Sandbox-URL',
'Sinkhole-HTTP-Drone' : {
'squelch_fields' : ('source.ip', 'malware.name'),
'shadowserver_mapping' : shadowserver_csv_entry('Sinkhole-HTTP-Drone'),
},
#'Spam-URL',
#'Vulnerable-ISAKMP',
}
def is_subnet_of(a, b):
"""
Returns boolean: is `a` a subnet of `b`?
"""
a = ipaddress.ip_network(a)
b = ipaddress.ip_network(b)
a_len = a.prefixlen
b_len = b.prefixlen
return a_len >= b_len and a.supernet(a_len - b_len) == b
def most_specific_matches(context):
"""Return the most specific matches from the context.
Which matches are considered more specific depends on which
attributes of the event matched and whether the matching entries in
the contact database are manually managed or utomatically.
Manual match is always more specifif than automatic.
Order of most specific is fqdn,ip,asn,geolocation.cc
This function adds to the one in rulesupport.py by only returning the match with the smallest ip-subnet if match by ip
Returns:
set of Match objects: The match objects which are considered
most specific
"""
by_field = defaultdict(lambda: {"manual": set(), "automatic": set()})
for match in context.matches:
context.logger.debug("Match field: %r, Managed by: %r", match.field,match.managed)
by_field[match.field][match.managed].add(match)
def get_preferred_by_field(field):
if field not in by_field:
return set()
elif field == "ip":
m=None # Use to store temporary matches
res=set() # Use to store resulting matches
by_managed = by_field[field]
context.logger.debug("Preferred by ip: %r", by_managed["manual"] or by_managed["automatic"])
# The logic here is:
# First run through all the manual matches and find the smallest subnet. Add all matches with this subnet.
# Then run through the automatic matches and see if there exist an ever smaller subnet than we found
# in the manual matches. If there exist a smaller subnet then replace the manual matches with the
# automatic matches that have this smaller subnet.
# We will not mix matches from manual and auto. If there are equal subnet matches we only use the manual
# ones to avoid having both a manual and an automatic match for the "same" organisation.
# I think it is highly unlikely that two different organisations will have the same subnet though and that
# the only time we have equal subnets is when we have both a manual and an automatic entry for the "same"
# organisation. This happens when we want to add inhibition and/or tags to the network.
for man_aut in (by_managed["manual"],by_managed["automatic"]):
for match in man_aut:
context.logger.debug("Match address is: %r",match.address)
if m is None: # No previous matches just add it to the resulting set
m = match
res.add(match)
else:
context.logger.debug("Previous best match: %r",m)
if match.address == m.address:
if match.managed == m.managed:
context.logger.debug("Adding match: %r",match)
res.add(match)
else:
context.logger.debug("Same subnet, but not same managed type: %r",match)
elif is_subnet_of(match.address,m.address):
# Smaller subnet found
context.logger.debug("This is a better match: %r",match)
m = match
res=set()
res.add(match)
return res
else:
by_managed = by_field[field]
return by_managed["manual"] or by_managed["automatic"]
return (get_preferred_by_field("fqdn") |
(get_preferred_by_field("ip") or get_preferred_by_field("asn")) |
get_preferred_by_field("geolocation.cc"))
def read_configuration():
"""Read configuration from system settings.
Return a dictionary or raise an error if no system config was found
"""
sys_conf_file = os.path.expanduser('/opt/intelmq/etc/intelmq-mailgen.conf')
if os.path.isfile(sys_conf_file):
with open(sys_conf_file) as conf_handle:
system_config = json.load(conf_handle)
else:
raise OSError("No configuration found.")
return system_config
def squelch(context,email=None,interval=None):
feedname = context.get('feed.name')
fields = feeds.get(feedname).get('squelch_fields')
context.logger.debug("Email = %r",email)
if squelch_time.get(email) and interval is None :
interval = squelch_time.get(email).get(feedname)
if interval: context.logger.debug('Using email->feed squelch value = %r',interval)
if interval is None:
interval = squelch_time.get('default').get(feedname)
if interval: context.logger.debug('Using default->feed squelch value = %r',interval)
if squelch_time.get(email) and interval is None :
interval = squelch_time.get(email).get('default')
if interval: context.logger.debug('Using email->default squelch value = %r',interval)
if interval is None:
interval = squelch_time.get('default').get('default')
if interval: context.logger.debug('Using default->default squelch value = %r',interval)
if interval is None:
interval = dateteime.timedelta(days=7)
context.logger.debug('Using built-in squelch value = %r',interval)
if not fields:
# If squelch_fields isn't defined or defined as (False) then no squelch
return False
CLAUSE=f'e."feed.name" = \'{context.get("feed.name")}\' AND '
for field in fields:
if "extra." in field:
wherefield=f"e.extra ->> '{field[6:]}'"
else:
wherefield=f'e."{field}"'
CLAUSE += f'{wherefield} = \'{context.get(field)}\' AND '
CLAUSE=CLAUSE.rstrip(' AND ')
context.logger.debug("CLAUSE = %s",CLAUSE)
QUERY_ACTIVE_DIRECTIVES = f"""
SELECT d.id
FROM directives AS d
JOIN events as e ON d.events_id = e.id
WHERE d.sent_id is null
AND {CLAUSE}
"""
QUERY_SENT_EVENTS = f"""
SELECT s."sent_at", e.id
FROM events AS e
JOIN directives AS d ON d.events_id = e.id
JOIN sent AS s ON s.id = d.sent_id
WHERE {CLAUSE}
ORDER BY s."sent_at" DESC
LIMIT 1;
"""
config = read_configuration()
now = datetime.datetime.now(datetime.timezone.utc)
cur = None
conn = open_db_connection(config, connection_factory=RealDictConnection)
try:
cur = conn.cursor()
cur.execute("SET TIME ZONE 'UTC';")
context.logger.debug("Fetching pending directives")
cur.execute(QUERY_ACTIVE_DIRECTIVES)
res = cur.fetchall()
if res:
context.logger.debug("Squelching due to active directives: %r",res)
return True
cur.execute(QUERY_SENT_EVENTS)
res = cur.fetchall()
context.logger.debug("Res is: %r",res)
if res:
last_sent = res[0]["sent_at"]
else:
last_sent = None
context.logger.debug("Last_sent: %s",last_sent)
return not (last_sent is None or (last_sent + interval < now))
except Exception as inst:
context.logger.debug("squelch() db-error")
context.logger.debug("Error: %r",inst)
context.logger.debug("Stack trace: %r",traceback.format_exc())
return False
finally:
context.logger.debug("Squelch finally clause")
cur.close()
conn.close()
def determine_directives(context):
context.logger.debug("============= 49RDP.py ===========")
context.logger.debug("Context.event: %r",context._event)
feedname = context.get("feed.name")
context.logger.debug("Feedname : %r", feedname)
if not feeds.get(feedname):
context.logger.debug("This feed is not handled")
# This script shall only handle listed feeds
return
shadowserver_params = feeds.get(feedname,{}).get('shadowserver_mapping')
context.logger.debug("params : %r", shadowserver_params)
if shadowserver_params is None:
# This script will only handle Shadowserver feeds that we have a mapping for
return
context.logger.debug("Shadowserver_params: %r",shadowserver_params)
context.logger.debug("Context Matches: %r", context.matches)
contact_emails = []
for contact in context.all_contacts():
contact_emails.append(contact.email)
context.logger.debug("Context contacts: %r", contact_emails)
if context.section == "destination":
# We are not interested in notifiying the Destination for this event.
return
else:
# Have a look at the Bots logging output. You may notice a
# difference: (If you are using the 20prioritize_contacts.py
# script, there should not be one)
context.logger.debug("Source IP: %r", context.get("source.ip"))
context.logger.debug("Source port: %r", context.get("source.port"))
# This line Logs all existing matches for an event, whilst
context.logger.debug("Most Specific Matches: %r",
most_specific_matches(context))
# This line will log only those matches which are considered as
# "most_specific" The SourceCode of
# intelmq.bots.experts.certbund_contact.rulesupport can tell you
# more details how this is evaluated. In short: FQDN is more
# specific than IP than ASN than geolocation.cc (indicating a
# nat. cert) So we will use the Output of the helper method
# most_specific_matches to continue:
msm = most_specific_matches(context)
# Now we need to determine, who is going to be notified in which way.
# Remember, this has to be evaluated by mailgen, you need to create some
# configuration there, too!
for match in msm:
# Iterate the matches...
# Matches tell us the organisations and their contacts that
# could be determined for a property of the event, such as
# IP-Address, ASN, CC.
# do not take care of automatic matches for the moment.
# TODO Check if you want to do this in production
# In our Test-Case the skript 06testbetrieb.py should make
# this piece of code unnecessary. But we want to be sure...
#-# if match.managed == "automatic":
#-# context.logger.debug("Skipping automatic match")
#-# continue
# Now get the annotations ("Tags") for the match
# Those annotations belong to the IP, ASN, FQDN or CC entry
match_annotations = match.annotations
# Most likely we are not going to need them in this script.
# For demonstration-purposes we are going to log them anyway:
context.logger.debug("Annotations for this Match %r",
match_annotations)
# Also organisations can carry these annotations ("Tags").
# We don't know them yet, as we'll need to iterate over the
# orgs to get them.
# Let's start actually doing things.
# I moved the decisionsmaking to the function "evaluate_match"
# As we are in a Loop, this function is called for every match.
evaluate_match(context, match, shadowserver_params)
# After this function has run, there should be some directives
# in the context
context.logger.debug("Directives %r", context.directives)
# End Processing and do not evaluate other directive-scripts
return True
def evaluate_match(context, match, shadowserver_params):
# For demonstration purposes, log some of the information available
# for decisions here
context.logger.debug("Directives before evalute_match %r", context.directives)
# 1) If a match for a FQDN exists,
if match.field == "fqdn":
context.logger.debug("Specific FQDN-Match: %r", match)
# 2) If a match for an IP exist.
# If an IP-Match exists, the Networks Address is written into
# the match as "address"
if match.field == "ip":
context.logger.debug("Specific IP-Match: %r for Network %s",
match, match.address)
# 3) If a match for an ASN exist,
if match.field == "asn":
context.logger.debug("Specific ASN-Match: %r", match)
# 4) If a match for a CountryCode exists (indicating a national cert),
if match.field == "geolocation.cc":
context.logger.debug("Specific Geolocation-Match: %r", match)
# You could also check how the match was managed here:
# for instance: if match.managed == "automatic"
# Check if match annotations inhibit the directive
for annotation in match.annotations:
context.logger.debug("Match Tag: %r",annotation.tag)
context.logger.debug("Match Condition: %r",annotation.condition)
context.logger.debug("Match Condition evals: %r",annotation.matches(context))
if annotation.tag == "inhibition":
if annotation.matches(context):
context.logger.info("Inhibiting directives for %r",match)
return
# Let's have a look at the Organisations associated to this match:
#context.logger.debug("Org: %r",org)
for org in context.organisations_for_match(match):
context.logger.debug("Org info: %r", org)
# Determine the Annotations for this Org.
org_annotations = org.annotations
context.logger.debug("Org Annotations: %r",org_annotations)
inhibit = False
for annotation in org_annotations:
context.logger.debug("Tag: %r",annotation.tag)
context.logger.debug("Condition: %r",annotation.condition)
context.logger.debug("Condition evals: %r",annotation.matches(context))
if annotation.tag == "inhibition":
if annotation.matches(context):
# Inhibit matches for this organisation
context.logger.info("Inhibiting directives for %r",org)
inhibit = True
continue
if inhibit:
continue
# Now create the Directives
#
# An organisation may have multiple contacts, so we need to
# iterate over them. In many cases this will only loop once as
# many organisations will have only one.
context.logger.debug("Org: %r",org)
for contact in org.contacts:
if contact.email_status != 'enabled':
context.logger.info("This contact is disabled: %r", contact)
continue
directive = Directive.from_contact(contact)
# Doing this defines "email" as medium and uses the
# contact's email attribute as the recipient_address.
# One could also do this by hand, see Directive in
# intelmq.bots.experts.certbund_contact.rulesupport
# If you like to know more details
# Now fill in more details of the directive, depending on
# the annotations of the directive and/or the type of the
# match
directive.update(shadowserver_params)
directive.aggregate_key["org_name"] = org.name
# directive.aggregate_by_field(context.section + ".asn")
# directive.aggregate_by_field("time.observation")
# aggregate_identifier = "ARRAY["
# for key in directive.aggregate_key:
# aggregate_identifier += "['" + key + "','" + directive.aggregate_key[key] + "']
# context.logger.debug("Aggregate key: %s(%s)",type(key),key)
# context.logger.debug("Aggregate keyval: %s(%s)",type(directive.aggregate_key[key]),directive.aggregate_key[key])
aggregate_identifier = directive.aggregate_key.copy()
for field in directive.aggregate_fields:
context.logger.debug("Aggregate field: %s(%s)",type(field),field)
context.logger.debug("Aggregate fieldvalue: %s(%s)",type(context.get(field)),context.get(field))
aggregate_identifier[field] = context.get(field)
context.logger.debug("Aggregate_identifier: %r",aggregate_identifier)
# Would like to have a default squelchtime and a per org squelqchtime
# Right now just setting it here
squelchtime = datetime.timedelta(days=7)
if not squelch(context,contact.email):
context.logger.debug("Adding directive: %r",directive)
context.add_directive(directive)
else:
context.logger.debug("Squelched directive")

View file

@ -7,182 +7,190 @@ class soc::intelmq(
String $apache_group = 'sunet-cert',
Boolean $use_shib = false,
) {
include sunet::systemd_reload
include sunet::systemd_reload
# Set some global variables
$api_user = lookup('intelmq_api_user.username', undef, undef, 'test')
$api_pass = lookup('intelmq_api_user.password', undef, undef, 'pass')
$db_user = lookup('intelmq_db_user.username', undef, undef, 'test')
$db_pass = lookup('intelmq_db_user.password', undef, undef, 'pass')
$privkey = lookup('gnupg.keyid', undef, undef, undef)
# Set some global variables
$api_user = lookup('intelmq_api_user.username', undef, undef, 'test')
$api_pass = lookup('intelmq_api_user.password', undef, undef, 'pass')
$db_user = lookup('intelmq_db_user.username', undef, undef, 'test')
$db_pass = lookup('intelmq_db_user.password', undef, undef, 'pass')
$privkey = lookup('gnupg.keyid', undef, undef, undef)
group { 'intelmq':
ensure => present,
}
group { 'intelmq':
ensure => present,
}
user { 'intelmq':
ensure => present,
gid => 'intelmq',
groups => 'www-data',
home => '/opt/intelmq',
managehome => true,
shell => '/bin/bash',
}
user { 'intelmq':
ensure => present,
gid => 'intelmq',
groups => 'www-data',
home => '/opt/intelmq',
managehome => true,
shell => '/bin/bash',
}
file { '/etc/intelmq':
ensure => directory,
owner => 'intelmq',
group => 'intelmq',
}
# Mailgen stuff
file { "All mailgen conf":
name => '/etc/intelmq/mailgen',
ensure => directory,
owner => 'intelmq',
group => 'intelmq',
recurse => true,
replace => true,
source => "/var/cache/cosmos/model/overlay/etc/puppet/modules/soc/files/intelmq/mailgen"
}
file { '/etc/intelmq/api':
file { '/etc/intelmq':
ensure => directory,
owner => 'intelmq',
group => 'www-data',
mode => '0770',
}
group => 'intelmq',
}
file { '/etc/intelmq/fody':
# Mailgen stuff
file { "All mailgen conf":
name => '/etc/intelmq/mailgen',
ensure => directory,
owner => 'intelmq',
group => 'intelmq',
recurse => true,
replace => true,
source => "/var/cache/cosmos/model/overlay/etc/puppet/modules/soc/files/intelmq/mailgen"
}
file { '/etc/intelmq/api':
ensure => directory,
owner => 'intelmq',
group => 'www-data',
mode => '0770',
}
file { '/etc/intelmq/fody':
ensure => directory,
owner => 'intelmq',
group => 'www-data',
mode => '0770',
}
package { 'apache2':
ensure => 'latest',
}
package { 'libapache2-mod-wsgi-py3':
ensure => 'latest',
}
$intelmq_dirs = ['/opt/intelmq/install', '/opt/intelmq/var', '/opt/intelmq/var/lib', '/opt/intelmq/var/lib/bots', '/opt/intelmq/var/lib/bots/modify', ]
$intelmq_dirs.each |String $intelmqdir| {
file { $intelmqdir:
ensure => directory,
owner => 'intelmq',
group => 'www-data',
mode => '0770',
group => 'intelmq',
mode => '0755',
}
}
package { 'apache2':
ensure => 'latest',
}
package { 'libapache2-mod-wsgi-py3':
ensure => 'latest',
}
$intelmq_dirs = ['/opt/intelmq/install', '/opt/intelmq/var', '/opt/intelmq/var/lib', '/opt/intelmq/var/lib/bots', '/opt/intelmq/var/lib/bots/sieve', ]
$intelmq_dirs.each |String $intelmqdir| {
file { $intelmqdir:
ensure => directory,
owner => 'intelmq',
group => 'intelmq',
mode => '0755',
}
}
file {
'/opt/intelmq/www':
ensure => directory,
owner => 'intelmq',
group => 'intelmq',
mode => '0755',
;
'/opt/intelmq/www/intelmq-manager':
file {
'/opt/intelmq/www':
ensure => directory,
owner => 'intelmq',
group => 'intelmq',
mode => '0755',
;
'/opt/intelmq/www/intelmq-manager':
ensure => directory,
owner => 'intelmq',
group => 'www-data',
mode => '0750',
;
'/opt/intelmq/www/fody':
'/opt/intelmq/www/fody':
ensure => directory,
owner => 'intelmq',
group => 'www-data',
mode => '0750',
;
}
}
file { '/opt/intelmq/install/setup-nodesource.sh':
ensure => file,
content => file('soc/intelmq/setup-nodesource.sh'),
mode => '0540',
}
file { '/opt/intelmq/install/setup-nodesource.sh':
ensure => file,
content => file('soc/intelmq/setup-nodesource.sh'),
mode => '0540',
}
exec { 'Add nodesource repo':
command => '/opt/intelmq/install/setup-nodesource.sh',
creates => '/etc/apt/sources.list.d/nodesource.list',
}
exec { 'Add nodesource repo':
command => '/opt/intelmq/install/setup-nodesource.sh',
creates => '/etc/apt/sources.list.d/nodesource.list',
}
package { ['postgresql', 'python3-venv', 'python3-pip', 'python3-gpg', 'python3-psycopg2', 'redict', 'nodejs', ]:
ensure => 'latest',
}
package { ['postgresql', 'python3-venv', 'python3-pip', 'python3-gpg', 'python3-psycopg2', 'redict', 'nodejs', ]:
ensure => 'latest',
}
package { ['cmdtest', ]:
ensure => 'absent',
}
package { ['cmdtest', ]:
ensure => 'absent',
}
exec { 'Install yarn from npm':
command => 'npm install --global yarn',
creates => '/usr/bin/yarn',
}
exec { 'Install yarn from npm':
command => 'npm install --global yarn',
creates => '/usr/bin/yarn',
}
exec { 'Install IntelMQ venv':
command => 'sudo -u intelmq /usr/bin/python3 -m venv --system-site-packages /opt/intelmq/venv',
creates => '/opt/intelmq/venv',
}
exec { 'Install IntelMQ venv':
command => 'sudo -u intelmq /usr/bin/python3 -m venv --system-site-packages /opt/intelmq/venv',
creates => '/opt/intelmq/venv',
}
exec { 'Always active venv for IntelMQ':
command => 'echo ". venv/bin/activate" >> /opt/intelmq/.profile',
unless => 'grep -q activate /opt/intelmq/.profile 2> /dev/null',
}
exec { 'Always active venv for IntelMQ':
command => 'echo ". venv/bin/activate" >> /opt/intelmq/.profile',
unless => 'grep -q activate /opt/intelmq/.profile 2> /dev/null',
}
file { '/opt/intelmq/install/eventdb-notifications.sql':
ensure => file,
content => file('soc/intelmq/eventdb-notifications.sql'),
}
file { '/opt/intelmq/install/eventdb-notifications.sql':
ensure => file,
content => file('soc/intelmq/eventdb-notifications.sql'),
}
file { '/opt/intelmq/install/install-intelmq.sh':
ensure => file,
content => file('soc/intelmq/install-intelmq.sh'),
mode => '0555',
}
file { '/opt/intelmq/install/install-intelmq.sh':
ensure => file,
content => file('soc/intelmq/install-intelmq.sh'),
mode => '0555',
}
exec { 'Install IntelMQ':
command => 'sudo -u intelmq /opt/intelmq/install/install-intelmq.sh',
creates => '/opt/intelmq/.installed'
}
exec { 'Install IntelMQ':
command => 'sudo -u intelmq /opt/intelmq/install/install-intelmq.sh',
creates => '/opt/intelmq/.installed',
require => File['/opt/intelmq/install/install-intelmq.sh'],
}
exec { 'Run IntelMQ setup script':
command => '/opt/intelmq/venv/bin/intelmqsetup',
creates => '/opt/intelmq/var/lib/state.json',
returns => ['0', '1',],
}
exec { 'Run IntelMQ setup script':
command => '/opt/intelmq/venv/bin/intelmqsetup',
creates => '/opt/intelmq/var/lib/state.json',
returns => ['0', '1',],
}
# System tools
file { '/usr/bin/intelmqctl':
ensure => file,
mode => '0555',
content => file('soc/intelmq/usr-bin-intelmqctl'),
}
# System tools
file { '/usr/bin/intelmqctl':
ensure => file,
mode => '0555',
content => file('soc/intelmq/usr-bin-intelmqctl'),
}
file { '/opt/intelmq/install/setup-pgsql.sh':
ensure => file,
content => template('soc/intelmq/setup-pgsql.sh'),
mode => '0500',
}
file { '/opt/intelmq/install/setup-pgsql.sh':
ensure => file,
content => template('soc/intelmq/setup-pgsql.sh'),
mode => '0500',
}
exec { 'Setup IntelMQ eventdb':
command => '/opt/intelmq/install/setup-pgsql.sh',
creates => '/opt/intelmq/.pgsql-installed',
}
exec { 'Setup IntelMQ eventdb':
command => '/opt/intelmq/install/setup-pgsql.sh',
creates => '/opt/intelmq/.pgsql-installed',
}
$sieve_scripts = ['domains', 'phishers', 'squatters', ]
$sieve_scripts.each |String $sieve| {
file { "/opt/intelmq/var/lib/bots/sieve/${sieve}.sieve":
ensure => file,
owner => 'intelmq',
group => 'intelmq',
mode => '0644',
content => file("soc/intelmq/sieve/${sieve}.sieve"),
}
}
file { '/opt/intelmq/var/lib/bots/sieve':
ensure => directory,
owner => 'intelmq',
group => 'intelmq',
recurse => true,
replace => true,
source => '/var/cache/cosmos/model/overlay/etc/puppet/modules/soc/files/intelmq/sieve'
}
file { '/opt/intelmq/var/lib/bots/notification_rules':
ensure => directory,
owner => 'intelmq',
group => 'intelmq',
recurse => true,
replace => true,
source => '/var/cache/cosmos/model/overlay/etc/puppet/modules/soc/files/intelmq/notification_rules'
}
file { '/etc/sudoers.d/01_intelmq-api':
ensure => file,
@ -316,7 +324,7 @@ class soc::intelmq(
exec { 'Import private gpg key':
command => "sudo -u intelmq /usr/bin/gpg --import /opt/intelmq/.gnupg/${privkey}.asc",
unless => "sudo -u intelmq /usr/bin/gpg -K 2>/dev/null | grep ${privkey}",
require => File["/opt/intelmq/.gnupg/${privkey}.asc"].
require => File["/opt/intelmq/.gnupg/${privkey}.asc"],
}
}