From 7ef3185698a789c2b246e16346612612dbc8aa60 Mon Sep 17 00:00:00 2001 From: weslambert Date: Thu, 11 Jul 2024 11:51:44 -0400 Subject: [PATCH 01/21] Add tenable_io --- .../scripts/supported-integrations.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/so-elastic-fleet-package-registry/scripts/supported-integrations.txt b/so-elastic-fleet-package-registry/scripts/supported-integrations.txt index 1e048a0..319fbee 100644 --- a/so-elastic-fleet-package-registry/scripts/supported-integrations.txt +++ b/so-elastic-fleet-package-registry/scripts/supported-integrations.txt @@ -65,6 +65,7 @@ sophos_central- symantec_endpoint- system- tcp- +tenable_io- tenable_sc- ti_abusech- ti_anomali- From 9ddfd026d3c21f0c0f2d18933d2218d7017dd0f4 Mon Sep 17 00:00:00 2001 From: weslambert Date: Mon, 15 Jul 2024 19:19:25 -0400 Subject: [PATCH 02/21] Update to 2.4/dev --- so-steno/Dockerfile | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/so-steno/Dockerfile b/so-steno/Dockerfile index 91998a2..c5d3bc7 100644 --- a/so-steno/Dockerfile +++ b/so-steno/Dockerfile @@ -13,18 +13,19 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -FROM ghcr.io/security-onion-solutions/centos:7 +FROM ghcr.io/security-onion-solutions/oraclelinux:9 -LABEL maintainer "Security Onion Solutions, LLC" +LABEL maintainer="Security Onion Solutions, LLC" LABEL description="Google Stenographer running in a docker for use with Security Onion." # Common CentOS layer RUN yum -y install epel-release bash libpcap iproute && \ - yum -y install https://repo.ius.io/ius-release-el7.rpm && \ - yum -y install snappy leveldb tcpdump jq libaio libseccomp golang which openssl python36u python36u-pip && \ - /usr/bin/pip3.6 install && \ + yum -y install snappy leveldb tcpdump jq libaio libseccomp golang which openssl && \ yum -y erase epel-release && yum clean all && rm -rf /var/cache/yum && \ - rpm -i https://github.com/Security-Onion-Solutions/securityonion-docker-rpm/releases/download/Stenoupgrade/stenographer-0-1.20200922gite8db1ee.el7.x86_64.rpm && \ + groupadd -g 941 stenographer && \ + useradd stenographer -u 941 -g 941 && \ + rpm -i https://github.com/Security-Onion-Solutions/securityonion-docker-rpm/releases/download/stenographer-v101/securityonion-stenographer-v1.0.1.0.rpm && \ + chmod 755 /usr/bin/steno* && \ setcap 'CAP_NET_RAW+ep CAP_NET_ADMIN+ep CAP_IPC_LOCK+ep CAP_SETGID+ep' /usr/bin/stenotype && \ mkdir -p /nsm/pcap/files && \ mkdir -p /nsm/pcap/index && \ From 55373283c8bc4532cbd4dc80159b7a8eecca7583 Mon Sep 17 00:00:00 2001 From: weslambert Date: Mon, 15 Jul 2024 19:20:23 -0400 Subject: [PATCH 03/21] Update to 2.4/dev --- so-tcpreplay/Dockerfile | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/so-tcpreplay/Dockerfile b/so-tcpreplay/Dockerfile index 0ec08a9..a46598e 100644 --- a/so-tcpreplay/Dockerfile +++ b/so-tcpreplay/Dockerfile @@ -1,4 +1,4 @@ -# Copyright 2014-2023 Security Onion Solutions, LLC +# Copyright Security Onion Solutions, LLC # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -13,9 +13,9 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -FROM ghcr.io/security-onion-solutions/centos:7 +FROM ghcr.io/security-onion-solutions/oraclelinux:9 -LABEL maintainer "Security Onion Solutions, LLC" +LABEL maintainer="Security Onion Solutions, LLC" LABEL description="Replay PCAPs to sniffing interface(s)" # Copy over tcpreplay - using v4.2.6 instead of 4.3.x because of known bugs: https://github.com/appneta/tcpreplay/issues/557 @@ -23,12 +23,12 @@ COPY files/tcpreplay /usr/local/bin/tcpreplay # Setup our utilities, download the pcap samples, convert them to RPM and install them RUN yum update -y && \ - yum clean all && \ + yum clean all && dnf config-manager --enable ol9_codeready_builder && dnf -y install oraclelinux-developer-release-el9 && dnf repolist && \ yum -y install epel-release && \ yum -y install libpcap && \ yum -y install rpmrebuild && \ yum -y install alien && \ - yum -y install wget && \ + yum -y install wget libnsl && \ \ for i in securityonion-samples_20121202-0ubuntu0securityonion4_all.deb securityonion-samples-bro_20170824-1ubuntu1securityonion3_all.deb securityonion-samples-markofu_20130522-0ubuntu0securityonion3_all.deb securityonion-samples-mta_20190514-1ubuntu1securityonion1_all.deb securityonion-samples-shellshock_20140926-0ubuntu0securityonion2_all.deb; do wget https://launchpad.net/~securityonion/+archive/ubuntu/stable/+files/$i; done && \ \ From b1d92524975358c5aae64901a4db8796e2b52de1 Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 30 Jul 2024 09:58:41 -0400 Subject: [PATCH 04/21] Add bash --- so-elastic-fleet-package-registry/Dockerfile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/so-elastic-fleet-package-registry/Dockerfile b/so-elastic-fleet-package-registry/Dockerfile index a6bd3da..866cdfb 100644 --- a/so-elastic-fleet-package-registry/Dockerfile +++ b/so-elastic-fleet-package-registry/Dockerfile @@ -16,6 +16,9 @@ ARG VERSION FROM docker.elastic.co/package-registry/distribution:$VERSION as original_image +# Add bash because it is not included in the new default base image of wolfi-base (default sh shell) +RUN apk add --no-cache bash coreutils + # Remove unsupported packages COPY scripts /scripts RUN chmod +x /scripts/supported-integrations.sh && bash /scripts/supported-integrations.sh && rm -rf /scripts From 3b7da7eca61a6b9cf28ec19b3b30ecf9ee7cb722 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 13 Aug 2024 07:16:34 -0400 Subject: [PATCH 05/21] remove unused images --- so-curator/Dockerfile | 42 - so-curator/entrypoint.sh | 3 - so-curator/files/actions.py | 2509 ----------------- so-curator/files/curator.repo | 6 - so-curator/files/settings.py | 160 -- so-mysql/Dockerfile | 47 - so-mysql/docker-entrypoint.sh | 214 -- so-mysql/healthcheck.sh | 24 - so-playbook/Dockerfile | 26 - so-playbook/playbook/circle_theme.tar.bz2 | Bin 19533 -> 0 bytes .../passenger-nginx-config-template.erb | 45 - so-soctopus/.gitignore | 2 - so-soctopus/Dockerfile | 40 - so-soctopus/elasticsearch.py | 1879 ------------ so-soctopus/so-soctopus/SOCtopus.conf | 67 - so-soctopus/so-soctopus/SOCtopus.py | 132 - so-soctopus/so-soctopus/config.py | 8 - so-soctopus/so-soctopus/destinations.py | 772 ----- so-soctopus/so-soctopus/forms.py | 8 - so-soctopus/so-soctopus/grr.py | 45 - so-soctopus/so-soctopus/helpers.py | 52 - so-soctopus/so-soctopus/playbook.py | 707 ----- .../playbook/securityonion-baseline.yml | 690 ----- .../playbook/securityonion-network.yml | 26 - so-soctopus/so-soctopus/playbook/sysmon.yml | 78 - .../so-soctopus/playbook_bulk-update.py | 196 -- .../playbook_elastalert_config.yaml | 53 - so-soctopus/so-soctopus/playbook_play-sync.py | 98 - .../so-soctopus/playbook_play-update.py | 41 - so-soctopus/so-soctopus/requirements.txt | 13 - so-soctopus/so-soctopus/templates/cancel.html | 11 - so-soctopus/so-soctopus/templates/hive.html | 21 - .../so-soctopus/templates/postresult.html | 16 - so-soctopus/so-soctopus/templates/result.html | 15 - .../so-soctopus/templates/strelka.html | 14 - .../so-soctopus/templates/update_event.html | 18 - so-soctopus/so-soctopus/wsgi.py | 4 - 37 files changed, 8082 deletions(-) delete mode 100644 so-curator/Dockerfile delete mode 100644 so-curator/entrypoint.sh delete mode 100644 so-curator/files/actions.py delete mode 100644 so-curator/files/curator.repo delete mode 100644 so-curator/files/settings.py delete mode 100644 so-mysql/Dockerfile delete mode 100644 so-mysql/docker-entrypoint.sh delete mode 100644 so-mysql/healthcheck.sh delete mode 100644 so-playbook/Dockerfile delete mode 100644 so-playbook/playbook/circle_theme.tar.bz2 delete mode 100644 so-playbook/playbook/passenger-nginx-config-template.erb delete mode 100644 so-soctopus/.gitignore delete mode 100644 so-soctopus/Dockerfile delete mode 100644 so-soctopus/elasticsearch.py delete mode 100644 so-soctopus/so-soctopus/SOCtopus.conf delete mode 100755 so-soctopus/so-soctopus/SOCtopus.py delete mode 100644 so-soctopus/so-soctopus/config.py delete mode 100644 so-soctopus/so-soctopus/destinations.py delete mode 100644 so-soctopus/so-soctopus/forms.py delete mode 100644 so-soctopus/so-soctopus/grr.py delete mode 100644 so-soctopus/so-soctopus/helpers.py delete mode 100644 so-soctopus/so-soctopus/playbook.py delete mode 100644 so-soctopus/so-soctopus/playbook/securityonion-baseline.yml delete mode 100644 so-soctopus/so-soctopus/playbook/securityonion-network.yml delete mode 100644 so-soctopus/so-soctopus/playbook/sysmon.yml delete mode 100644 so-soctopus/so-soctopus/playbook_bulk-update.py delete mode 100644 so-soctopus/so-soctopus/playbook_elastalert_config.yaml delete mode 100644 so-soctopus/so-soctopus/playbook_play-sync.py delete mode 100644 so-soctopus/so-soctopus/playbook_play-update.py delete mode 100644 so-soctopus/so-soctopus/requirements.txt delete mode 100644 so-soctopus/so-soctopus/templates/cancel.html delete mode 100644 so-soctopus/so-soctopus/templates/hive.html delete mode 100644 so-soctopus/so-soctopus/templates/postresult.html delete mode 100644 so-soctopus/so-soctopus/templates/result.html delete mode 100644 so-soctopus/so-soctopus/templates/strelka.html delete mode 100644 so-soctopus/so-soctopus/templates/update_event.html delete mode 100644 so-soctopus/so-soctopus/wsgi.py diff --git a/so-curator/Dockerfile b/so-curator/Dockerfile deleted file mode 100644 index 1336523..0000000 --- a/so-curator/Dockerfile +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright Security Onion Solutions, LLC - -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -FROM alpine:3 - -LABEL maintainer "Security Onion Solutions, LLC" - -ARG GID=934 -ARG UID=934 -ARG USERNAME=curator - -ENV LC_ALL=en_US.UTF-8 - -USER root - -RUN apk --no-cache add python3 py-setuptools py-pip gcc libffi py-cffi python3-dev libffi-dev py-openssl musl-dev linux-headers openssl-dev && \ - pip install elasticsearch-curator && \ - apk del gcc python3-dev libffi-dev musl-dev linux-headers openssl-dev - -RUN addgroup -g ${GID} ${USERNAME} && \ - adduser -u ${UID} -G ${USERNAME} -D -H ${USERNAME} - -COPY ../files/actions.py /usr/lib/python3.10/site-packages/curator/actions.py -COPY ../files/settings.py /usr/lib/python3.10/site-packages/curator/defaults/settings.py -COPY entrypoint.sh /entrypoint.sh -RUN chmod +x /entrypoint.sh - -USER curator - -ENTRYPOINT [ "/entrypoint.sh" ] diff --git a/so-curator/entrypoint.sh b/so-curator/entrypoint.sh deleted file mode 100644 index bdb7f20..0000000 --- a/so-curator/entrypoint.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh - -while true; do sleep 1; done \ No newline at end of file diff --git a/so-curator/files/actions.py b/so-curator/files/actions.py deleted file mode 100644 index e022de3..0000000 --- a/so-curator/files/actions.py +++ /dev/null @@ -1,2509 +0,0 @@ -"""Curator Actions""" -import logging -import re -import time -from copy import deepcopy -from datetime import datetime -from elasticsearch.exceptions import ConflictError, RequestError -from curator import exceptions, utils - -class Alias(object): - """Alias Action Class""" - def __init__(self, name=None, extra_settings={}, **kwargs): - """ - Define the Alias object. - - :arg name: The alias name - :arg extra_settings: Extra settings, including filters and routing. For - more information see - https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html - :type extra_settings: dict, representing the settings. - """ - if not name: - raise exceptions.MissingArgument('No value for "name" provided.') - #: Instance variable - #: The strftime parsed version of `name`. - self.name = utils.parse_date_pattern(name) - #: The list of actions to perform. Populated by - #: :mod:`curator.actions.Alias.add` and - #: :mod:`curator.actions.Alias.remove` - self.actions = [] - #: Instance variable. - #: The Elasticsearch Client object derived from `ilo` - self.client = None - #: Instance variable. - #: Any extra things to add to the alias, like filters, or routing. - self.extra_settings = extra_settings - self.loggit = logging.getLogger('curator.actions.alias') - #: Instance variable. - #: Preset default value to `False`. - self.warn_if_no_indices = False - - def add(self, ilo, warn_if_no_indices=False): - """ - Create `add` statements for each index in `ilo` for `alias`, then - append them to `actions`. Add any `extras` that may be there. - - :arg ilo: A :class:`curator.indexlist.IndexList` object - - """ - utils.verify_index_list(ilo) - if not self.client: - self.client = ilo.client - self.name = utils.parse_datemath(self.client, self.name) - try: - ilo.empty_list_check() - except exceptions.NoIndices: - # Add a warning if there are no indices to add, if so set in options - if warn_if_no_indices: - self.warn_if_no_indices = True - self.loggit.warn( - 'No indices found after processing filters. ' - 'Nothing to add to {0}'.format(self.name) - ) - return - else: - # Re-raise the exceptions.NoIndices so it will behave as before - raise exceptions.NoIndices('No indices to add to alias') - for index in ilo.working_list(): - self.loggit.debug( - 'Adding index {0} to alias {1} with extra settings ' - '{2}'.format(index, self.name, self.extra_settings) - ) - add_dict = {'add' : {'index' : index, 'alias': self.name}} - add_dict['add'].update(self.extra_settings) - self.actions.append(add_dict) - - def remove(self, ilo, warn_if_no_indices=False): - """ - Create `remove` statements for each index in `ilo` for `alias`, - then append them to `actions`. - - :arg ilo: A :class:`curator.indexlist.IndexList` object - """ - utils.verify_index_list(ilo) - if not self.client: - self.client = ilo.client - self.name = utils.parse_datemath(self.client, self.name) - try: - ilo.empty_list_check() - except exceptions.NoIndices: - # Add a warning if there are no indices to add, if so set in options - if warn_if_no_indices: - self.warn_if_no_indices = True - self.loggit.warn( - 'No indices found after processing filters. ' - 'Nothing to remove from {0}'.format(self.name) - ) - return - else: - # Re-raise the exceptions.NoIndices so it will behave as before - raise exceptions.NoIndices('No indices to remove from alias') - aliases = self.client.indices.get_alias() - for index in ilo.working_list(): - if index in aliases: - self.loggit.debug( - 'Index {0} in get_aliases output'.format(index)) - # Only remove if the index is associated with the alias - if self.name in aliases[index]['aliases']: - self.loggit.debug( - 'Removing index {0} from alias ' - '{1}'.format(index, self.name) - ) - self.actions.append( - {'remove' : {'index' : index, 'alias': self.name}}) - else: - self.loggit.debug( - 'Can not remove: Index {0} is not associated with alias' - ' {1}'.format(index, self.name) - ) - - def body(self): - """ - Return a `body` string suitable for use with the `update_aliases` API - call. - """ - if not self.actions: - if not self.warn_if_no_indices: - raise exceptions.ActionError('No "add" or "remove" operations') - else: - raise exceptions.NoIndices('No "adds" or "removes" found. Taking no action') - self.loggit.debug('Alias actions: {0}'.format(self.actions)) - - return {'actions' : self.actions} - - def do_dry_run(self): - """ - Log what the output would be, but take no action. - """ - self.loggit.info('DRY-RUN MODE. No changes will be made.') - for item in self.body()['actions']: - job = list(item.keys())[0] - index = item[job]['index'] - alias = item[job]['alias'] - # We want our log to look clever, so if job is "remove", strip the - # 'e' so "remove" can become "removing". "adding" works already. - self.loggit.info( - 'DRY-RUN: alias: {0}ing index "{1}" {2} alias ' - '"{3}"'.format( - job.rstrip('e'), - index, - 'to' if job is 'add' else 'from', - alias - ) - ) - - def do_action(self): - """ - Run the API call `update_aliases` with the results of `body()` - """ - self.loggit.info('Updating aliases...') - self.loggit.info('Alias actions: {0}'.format(self.body())) - try: - self.client.indices.update_aliases(body=self.body()) - except Exception as err: - utils.report_failure(err) - -class Allocation(object): - """Allocation Action Class""" - def __init__( - self, ilo, key=None, value=None, allocation_type='require', wait_for_completion=False, - wait_interval=3, max_wait=-1 - ): - """ - :arg ilo: A :class:`curator.indexlist.IndexList` object - :arg key: An arbitrary metadata attribute key. Must match the key - assigned to at least some of your nodes to have any effect. - :arg value: An arbitrary metadata attribute value. Must correspond to - values associated with `key` assigned to at least some of your nodes - to have any effect. If a `None` value is provided, it will remove - any setting associated with that `key`. - :arg allocation_type: Type of allocation to apply. Default is `require` - :arg wait_for_completion: Wait (or not) for the operation - to complete before returning. (default: `False`) - :type wait_for_completion: bool - :arg wait_interval: How long in seconds to wait between checks for - completion. - :arg max_wait: Maximum number of seconds to `wait_for_completion` - - .. note:: - See: - https://www.elastic.co/guide/en/elasticsearch/reference/current/shard-allocation-filtering.html - """ - utils.verify_index_list(ilo) - if not key: - raise exceptions.MissingArgument('No value for "key" provided') - if allocation_type not in ['require', 'include', 'exclude']: - raise ValueError( - '{0} is an invalid allocation_type. Must be one of "require", ' - '"include", "exclude".'.format(allocation_type) - ) - #: Instance variable. - #: Internal reference to `ilo` - self.index_list = ilo - #: Instance variable. - #: The Elasticsearch Client object derived from `ilo` - self.client = ilo.client - self.loggit = logging.getLogger('curator.actions.allocation') - #: Instance variable. - #: Populated at instance creation time. Value is - #: ``index.routing.allocation.`` `allocation_type` ``.`` `key` ``.`` `value` - bkey = 'index.routing.allocation.{0}.{1}'.format(allocation_type, key) - self.body = {bkey : value} - #: Instance variable. - #: Internal reference to `wait_for_completion` - self.wfc = wait_for_completion - #: Instance variable - #: How many seconds to wait between checks for completion. - self.wait_interval = wait_interval - #: Instance variable. - #: How long in seconds to `wait_for_completion` before returning with an - #: exception. A value of -1 means wait forever. - self.max_wait = max_wait - - def do_dry_run(self): - """ - Log what the output would be, but take no action. - """ - utils.show_dry_run(self.index_list, 'allocation', body=self.body) - - def do_action(self): - """ - Change allocation settings for indices in `index_list.indices` with the - settings in `body`. - """ - self.loggit.debug( - 'Cannot get change shard routing allocation of closed indices. ' - 'Omitting any closed indices.' - ) - self.index_list.filter_closed() - self.index_list.empty_list_check() - self.loggit.info( - 'Updating {0} selected indices: {1}'.format( - len(self.index_list.indices), self.index_list.indices - ) - ) - self.loggit.info('Updating index setting {0}'.format(self.body)) - try: - index_lists = utils.chunk_index_list(self.index_list.indices) - for lst in index_lists: - self.client.indices.put_settings( - index=utils.to_csv(lst), body=self.body - ) - if self.wfc: - self.loggit.debug( - 'Waiting for shards to complete relocation for indices:' - ' {0}'.format(utils.to_csv(lst)) - ) - utils.wait_for_it( - self.client, 'allocation', - wait_interval=self.wait_interval, max_wait=self.max_wait - ) - except Exception as err: - utils.report_failure(err) - - -class Close(object): - """Close Action Class""" - def __init__(self, ilo, delete_aliases=False, skip_flush=False, ignore_sync_failures=False): - """ - :arg ilo: A :class:`curator.indexlist.IndexList` object - :arg delete_aliases: If `True`, will delete any associated aliases - before closing indices. - :type delete_aliases: bool - :arg skip_flush: If `True`, will not flush indices before closing. - :type skip_flush: bool - :arg ignore_sync_failures: If `True`, will not fail if there are failures while attempting - a synced flush. - :type ignore_sync_failures: bool - """ - utils.verify_index_list(ilo) - #: Instance variable. - #: Internal reference to `ilo` - self.index_list = ilo - #: Instance variable. - #: Internal reference to `delete_aliases` - self.delete_aliases = delete_aliases - #: Instance variable. - #: Internal reference to `skip_flush` - self.skip_flush = skip_flush - #: Instance variable. - #: Internal reference to `ignore_sync_failures` - self.ignore_sync_failures = ignore_sync_failures - #: Instance variable. - #: The Elasticsearch Client object derived from `ilo` - self.client = ilo.client - self.loggit = logging.getLogger('curator.actions.close') - - - def do_dry_run(self): - """ - Log what the output would be, but take no action. - """ - utils.show_dry_run( - self.index_list, 'close', **{'delete_aliases':self.delete_aliases}) - - def do_action(self): - """ - Close open indices in `index_list.indices` - """ - self.index_list.filter_closed() - self.index_list.empty_list_check() - self.loggit.info( - 'Closing {0} selected indices: {1}'.format( - len(self.index_list.indices), self.index_list.indices - ) - ) - try: - index_lists = utils.chunk_index_list(self.index_list.indices) - for lst in index_lists: - lst_as_csv = utils.to_csv(lst) - self.loggit.debug('CSV list of indices to close: {0}'.format(lst_as_csv)) - if self.delete_aliases: - self.loggit.info('Deleting aliases from indices before closing.') - self.loggit.debug('Deleting aliases from: {0}'.format(lst)) - try: - self.client.indices.delete_alias(index=lst_as_csv, name='_all') - self.loggit.debug('Deleted aliases from: {0}'.format(lst)) - except Exception as err: - self.loggit.warn( - 'Some indices may not have had aliases. Exception:' - ' {0}'.format(err) - ) - if not self.skip_flush: - try: - self.client.indices.flush(index=lst_as_csv, ignore_unavailable=True) - except ConflictError as err: - if not self.ignore_sync_failures: - raise ConflictError(err.status_code, err.error, err.info) - else: - self.loggit.warn( - 'Ignoring flushed sync failures: ' - '{0} {1}'.format(err.error, err.info) - ) - self.client.indices.close(index=lst_as_csv, ignore_unavailable=True) - except Exception as err: - utils.report_failure(err) - -class Freeze(object): - """Freeze Action Class""" - def __init__(self, ilo): - """ - :arg ilo: A :class:`curator.indexlist.IndexList` object - """ - utils.verify_index_list(ilo) - #: Instance variable. - #: Internal reference to `ilo` - self.index_list = ilo - #: Instance variable. - #: The Elasticsearch Client object derived from `ilo` - self.client = ilo.client - self.loggit = logging.getLogger('curator.actions.freeze') - - - def do_dry_run(self): - """ - Log what the output would be, but take no action. - """ - utils.show_dry_run( - self.index_list, 'freeze') - - def do_action(self): - """ - Freeze indices in `index_list.indices` - """ - #self.index_list.filter_frozen() - self.index_list.empty_list_check() - self.loggit.info( - 'Freezing {0} selected indices: {1}'.format( - len(self.index_list.indices), self.index_list.indices - ) - ) - try: - index_lists = utils.chunk_index_list(self.index_list.indices) - for lst in index_lists: - self.client.xpack.indices.freeze( - index=utils.to_csv(lst)) - except Exception as err: - utils.report_failure(err) - - -class Unfreeze(object): - """Unfreeze Action Class""" - def __init__(self, ilo): - """ - :arg ilo: A :class:`curator.indexlist.IndexList` object - """ - utils.verify_index_list(ilo) - #: Instance variable. - #: Internal reference to `ilo` - self.index_list = ilo - #: Instance variable. - #: The Elasticsearch Client object derived from `ilo` - self.client = ilo.client - self.loggit = logging.getLogger('curator.actions.unfreeze') - - - def do_dry_run(self): - """ - Log what the output would be, but take no action. - """ - utils.show_dry_run( - self.index_list, 'unfreeze') - - def do_action(self): - """ - Unfreeze indices in `index_list.indices` - """ - self.index_list.empty_list_check() - self.loggit.info( - 'Unfreezing {0} selected indices: {1}'.format( - len(self.index_list.indices), self.index_list.indices - ) - ) - try: - index_lists = utils.chunk_index_list(self.index_list.indices) - for lst in index_lists: - self.client.xpack.indices.unfreeze( - index=utils.to_csv(lst)) - except Exception as err: - utils.report_failure(err) - - -class ClusterRouting(object): - """ClusterRouting Action Class""" - def __init__( - self, client, routing_type=None, setting=None, value=None, wait_for_completion=False, - wait_interval=9, max_wait=-1 - ): - """ - For now, the cluster routing settings are hardcoded to be ``transient`` - - :arg client: An :class:`elasticsearch.Elasticsearch` client object - :arg routing_type: Type of routing to apply. Either `allocation` or - `rebalance` - :arg setting: Currently, the only acceptable value for `setting` is - ``enable``. This is here in case that changes. - :arg value: Used only if `setting` is `enable`. Semi-dependent on - `routing_type`. Acceptable values for `allocation` and `rebalance` - are ``all``, ``primaries``, and ``none`` (string, not `NoneType`). - If `routing_type` is `allocation`, this can also be - ``new_primaries``, and if `rebalance`, it can be ``replicas``. - :arg wait_for_completion: Wait (or not) for the operation - to complete before returning. (default: `False`) - :type wait_for_completion: bool - :arg wait_interval: How long in seconds to wait between checks for - completion. - :arg max_wait: Maximum number of seconds to `wait_for_completion` - """ - utils.verify_client_object(client) - #: Instance variable. - #: An :class:`elasticsearch.Elasticsearch` client object - self.client = client - self.loggit = logging.getLogger('curator.actions.cluster_routing') - #: Instance variable. - #: Internal reference to `wait_for_completion` - self.wfc = wait_for_completion - #: Instance variable - #: How many seconds to wait between checks for completion. - self.wait_interval = wait_interval - #: Instance variable. - #: How long in seconds to `wait_for_completion` before returning with an - #: exception. A value of -1 means wait forever. - self.max_wait = max_wait - - if setting != 'enable': - raise ValueError( - 'Invalid value for "setting": {0}.'.format(setting) - ) - if routing_type == 'allocation': - if value not in ['all', 'primaries', 'new_primaries', 'none']: - raise ValueError( - 'Invalid "value": {0} with "routing_type":' - '{1}.'.format(value, routing_type) - ) - elif routing_type == 'rebalance': - if value not in ['all', 'primaries', 'replicas', 'none']: - raise ValueError( - 'Invalid "value": {0} with "routing_type":' - '{1}.'.format(value, routing_type) - ) - else: - raise ValueError( - 'Invalid value for "routing_type": {0}.'.format(routing_type) - ) - bkey = 'cluster.routing.{0}.{1}'.format(routing_type, setting) - self.body = {'transient' : {bkey : value}} - - def do_dry_run(self): - """ - Log what the output would be, but take no action. - """ - self.loggit.info('DRY-RUN MODE. No changes will be made.') - self.loggit.info( - 'DRY-RUN: Update cluster routing settings with arguments: ' - '{0}'.format(self.body) - ) - - def do_action(self): - """ - Change cluster routing settings with the settings in `body`. - """ - self.loggit.info('Updating cluster settings: {0}'.format(self.body)) - try: - self.client.cluster.put_settings(body=self.body) - if self.wfc: - self.loggit.debug( - 'Waiting for shards to complete routing and/or rebalancing' - ) - utils.wait_for_it( - self.client, 'cluster_routing', - wait_interval=self.wait_interval, max_wait=self.max_wait - ) - except Exception as err: - utils.report_failure(err) - -class CreateIndex(object): - """Create Index Action Class""" - def __init__(self, client, name, extra_settings={}, ignore_existing=False): - """ - :arg client: An :class:`elasticsearch.Elasticsearch` client object - :arg name: A name, which can contain :py:func:`time.strftime` - strings - :arg extra_settings: The `settings` and `mappings` for the index. For - more information see - https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html - :type extra_settings: dict, representing the settings and mappings. - :arg ignore_existing: If an index already exists, and this setting is ``True``, - ignore the 400 error that results in a `resource_already_exists_exception` and - return that it was successful. - """ - if not name: - raise exceptions.ConfigurationError('Value for "name" not provided.') - #: Instance variable. - #: The parsed version of `name` - self.name = utils.parse_date_pattern(name) - #: Instance variable. - #: Extracted from the action yaml, it should be a dictionary of - #: mappings and settings suitable for index creation. - self.body = extra_settings - #: Instance variable. - #: Extracted from the action yaml, it should be a boolean informing - #: whether to ignore the error if the index already exists. - self.ignore_existing = ignore_existing - #: Instance variable. - #: An :class:`elasticsearch.Elasticsearch` client object - self.client = client - self.loggit = logging.getLogger('curator.actions.create_index') - - def do_dry_run(self): - """ - Log what the output would be, but take no action. - """ - self.loggit.info('DRY-RUN MODE. No changes will be made.') - self.loggit.info( - 'DRY-RUN: create_index "%s" with arguments: ' - '%s' % (self.name, self.body) - ) - - def do_action(self): - """ - Create index identified by `name` with settings in `body` - """ - self.loggit.info( - 'Creating index "{0}" with settings: ' - '{1}'.format(self.name, self.body) - ) - try: - self.client.indices.create(index=self.name, body=self.body) - # Most likely error is a 400, `resource_already_exists_exception` - except RequestError as err: - match_list = ["index_already_exists_exception", "resource_already_exists_exception"] - if err.error in match_list and self.ignore_existing: - self.loggit.warn('Index %s already exists.' % self.name) - else: - raise exceptions.FailedExecution('Index %s already exists.' % self.name) - except Exception as err: - utils.report_failure(err) - -class DeleteIndices(object): - """Delete Indices Action Class""" - def __init__(self, ilo, master_timeout=30): - """ - :arg ilo: A :class:`curator.indexlist.IndexList` object - :arg master_timeout: Number of seconds to wait for master node response - """ - utils.verify_index_list(ilo) - if not isinstance(master_timeout, int): - raise TypeError( - 'Incorrect type for "master_timeout": {0}. ' - 'Should be integer value.'.format(type(master_timeout)) - ) - #: Instance variable. - #: Internal reference to `ilo` - self.index_list = ilo - #: Instance variable. - #: The Elasticsearch Client object derived from `ilo` - self.client = ilo.client - #: Instance variable. - #: String value of `master_timeout` + 's', for seconds. - self.master_timeout = str(master_timeout) + 's' - self.loggit = logging.getLogger('curator.actions.delete_indices') - self.loggit.debug('master_timeout value: {0}'.format( - self.master_timeout)) - - def _verify_result(self, result, count): - """ - Breakout method to aid readability - :arg result: A list of indices from `_get_result_list` - :arg count: The number of tries that have occurred - :rtype: bool - """ - if isinstance(result, list) and result: - self.loggit.error( - 'The following indices failed to delete on try ' - '#{0}:'.format(count) - ) - for idx in result: - self.loggit.error("---{0}".format(idx)) - retval = False - else: - self.loggit.debug( - 'Successfully deleted all indices on try #{0}'.format(count) - ) - retval = True - return retval - - def __chunk_loop(self, chunk_list): - """ - Loop through deletes 3 times to ensure they complete - :arg chunk_list: A list of indices pre-chunked so it won't overload the - URL size limit. - """ - working_list = chunk_list - for count in range(1, 4): # Try 3 times - for i in working_list: - self.loggit.info("---deleting index {0}".format(i)) - self.client.indices.delete( - index=utils.to_csv(working_list), master_timeout=self.master_timeout) - result = [i for i in working_list if i in utils.get_indices(self.client)] - if self._verify_result(result, count): - return - else: - working_list = result - self.loggit.error( - 'Unable to delete the following indices after 3 attempts: ' - '{0}'.format(result) - ) - - def do_dry_run(self): - """ - Log what the output would be, but take no action. - """ - utils.show_dry_run(self.index_list, 'delete_indices') - - def do_action(self): - """ - Delete indices in `index_list.indices` - """ - self.index_list.empty_list_check() - self.loggit.info( - 'Deleting {0} selected indices: {1}'.format( - len(self.index_list.indices), self.index_list.indices - ) - ) - try: - index_lists = utils.chunk_index_list(self.index_list.indices) - for lst in index_lists: - self.__chunk_loop(lst) - except Exception as err: - utils.report_failure(err) - -class ForceMerge(object): - """ForceMerge Action Class""" - def __init__(self, ilo, max_num_segments=None, delay=0): - """ - :arg ilo: A :class:`curator.indexlist.IndexList` object - :arg max_num_segments: Number of segments per shard to forceMerge - :arg delay: Number of seconds to delay between forceMerge operations - """ - utils.verify_index_list(ilo) - if not max_num_segments: - raise exceptions.MissingArgument('Missing value for "max_num_segments"') - #: Instance variable. - #: The Elasticsearch Client object derived from `ilo` - self.client = ilo.client - #: Instance variable. - #: Internal reference to `ilo` - self.index_list = ilo - #: Instance variable. - #: Internally accessible copy of `max_num_segments` - self.max_num_segments = max_num_segments - #: Instance variable. - #: Internally accessible copy of `delay` - self.delay = delay - self.loggit = logging.getLogger('curator.actions.forcemerge') - - def do_dry_run(self): - """ - Log what the output would be, but take no action. - """ - utils.show_dry_run( - self.index_list, 'forcemerge', - max_num_segments=self.max_num_segments, - delay=self.delay, - ) - - def do_action(self): - """ - forcemerge indices in `index_list.indices` - """ - self.index_list.filter_closed() - self.index_list.filter_forceMerged( - max_num_segments=self.max_num_segments) - self.index_list.empty_list_check() - self.loggit.info( - 'forceMerging {0} selected indices: {1}'.format( - len(self.index_list.indices), self.index_list.indices - ) - ) - try: - for index_name in self.index_list.indices: - self.loggit.info( - 'forceMerging index {0} to {1} segments per shard. ' - 'Please wait...'.format(index_name, self.max_num_segments) - ) - self.client.indices.forcemerge( - index=index_name, max_num_segments=self.max_num_segments) - if self.delay > 0: - self.loggit.info( - 'Pausing for {0} seconds before continuing...'.format(self.delay)) - time.sleep(self.delay) - except Exception as err: - utils.report_failure(err) - - -class IndexSettings(object): - """Index Settings Action Class""" - def __init__( - self, ilo, index_settings={}, ignore_unavailable=False, preserve_existing=False): - """ - :arg ilo: A :class:`curator.indexlist.IndexList` object - :arg index_settings: A dictionary structure with one or more index - settings to change. - :arg ignore_unavailable: Whether specified concrete indices should be - ignored when unavailable (missing or closed) - :arg preserve_existing: Whether to update existing settings. If set to - ``True`` existing settings on an index remain unchanged. The default - is ``False`` - """ - utils.verify_index_list(ilo) - if not index_settings: - raise exceptions.MissingArgument('Missing value for "index_settings"') - #: Instance variable. - #: The Elasticsearch Client object derived from `ilo` - self.client = ilo.client - #: Instance variable. - #: Internal reference to `ilo` - self.index_list = ilo - #: Instance variable. - #: Internal reference to `index_settings` - self.body = index_settings - #: Instance variable. - #: Internal reference to `ignore_unavailable` - self.ignore_unavailable = ignore_unavailable - #: Instance variable. - #: Internal reference to `preserve_settings` - self.preserve_existing = preserve_existing - - self.loggit = logging.getLogger('curator.actions.index_settings') - self._body_check() - - def _body_check(self): - # The body only passes the skimpiest of requirements by having 'index' - # as the only root-level key, and having a 'dict' as its value - if len(self.body) == 1: - if 'index' in self.body: - if isinstance(self.body['index'], dict): - return True - raise exceptions.ConfigurationError( - 'Bad value for "index_settings": {0}'.format(self.body)) - - def _static_settings(self): - return [ - 'number_of_shards', - 'shard', - 'codec', - 'routing_partition_size', - ] - - def _dynamic_settings(self): - return [ - 'number_of_replicas', - 'auto_expand_replicas', - 'refresh_interval', - 'max_result_window', - 'max_rescore_window', - 'blocks', - 'max_refresh_listeners', - 'mapping', - 'merge', - 'translog', - ] - - def _settings_check(self): - # Detect if even one index is open. Save all found to open_index_list. - open_index_list = [] - open_indices = False - for idx in self.index_list.indices: - if self.index_list.index_info[idx]['state'] == 'open': - open_index_list.append(idx) - open_indices = True - for k in self.body['index']: - if k in self._static_settings(): - if not self.ignore_unavailable: - if open_indices: - raise exceptions.ActionError( - 'Static Setting "{0}" detected with open indices: ' - '{1}. Static settings can only be used with closed ' - 'indices. Recommend filtering out open indices, ' - 'or setting ignore_unavailable to True'.format( - k, open_index_list - ) - ) - elif k in self._dynamic_settings(): - # Dynamic settings should be appliable to open or closed indices - # Act here if the case is different for some settings. - pass - else: - self.loggit.warn( - '"{0}" is not a setting Curator recognizes and may or may ' - 'not work.'.format(k) - ) - - def do_dry_run(self): - """ - Log what the output would be, but take no action. - """ - utils.show_dry_run(self.index_list, 'indexsettings', **self.body) - - def do_action(self): - """Actually do the action""" - self._settings_check() - # Ensure that the open indices filter applied in _settings_check() - # didn't result in an empty list (or otherwise empty) - self.index_list.empty_list_check() - self.loggit.info( - 'Applying index settings to {0} indices: ' - '{1}'.format(len(self.index_list.indices), self.index_list.indices) - ) - try: - index_lists = utils.chunk_index_list(self.index_list.indices) - for lst in index_lists: - response = self.client.indices.put_settings( - index=utils.to_csv(lst), body=self.body, - ignore_unavailable=self.ignore_unavailable, - preserve_existing=self.preserve_existing - ) - self.loggit.debug('PUT SETTINGS RESPONSE: {0}'.format(response)) - except Exception as err: - utils.report_failure(err) - - -class Open(object): - """Open Action Class""" - def __init__(self, ilo): - """ - :arg ilo: A :class:`curator.indexlist.IndexList` object - """ - utils.verify_index_list(ilo) - #: Instance variable. - #: The Elasticsearch Client object derived from `ilo` - self.client = ilo.client - #: Instance variable. - #: Internal reference to `ilo` - self.index_list = ilo - self.loggit = logging.getLogger('curator.actions.open') - - def do_dry_run(self): - """ - Log what the output would be, but take no action. - """ - utils.show_dry_run(self.index_list, 'open') - - def do_action(self): - """ - Open closed indices in `index_list.indices` - """ - self.index_list.empty_list_check() - self.loggit.info( - 'Opening {0} selected indices: {1}'.format( - len(self.index_list.indices), - self.index_list.indices - ) - ) - try: - index_lists = utils.chunk_index_list(self.index_list.indices) - for lst in index_lists: - self.client.indices.open(index=utils.to_csv(lst)) - except Exception as err: - utils.report_failure(err) - -class Replicas(object): - """Replica Action Class""" - def __init__( - self, ilo, count=None, wait_for_completion=False, wait_interval=9, max_wait=-1): - """ - :arg ilo: A :class:`curator.indexlist.IndexList` object - :arg count: The count of replicas per shard - :arg wait_for_completion: Wait (or not) for the operation - to complete before returning. (default: `False`) - :type wait_for_completion: bool - :arg wait_interval: How long in seconds to wait between checks for - completion. - :arg max_wait: Maximum number of seconds to `wait_for_completion` - """ - utils.verify_index_list(ilo) - # It's okay for count to be zero - if count == 0: - pass - elif not count: - raise exceptions.MissingArgument('Missing value for "count"') - #: Instance variable. - #: The Elasticsearch Client object derived from `ilo` - self.client = ilo.client - #: Instance variable. - #: Internal reference to `ilo` - self.index_list = ilo - #: Instance variable. - #: Internally accessible copy of `count` - self.count = count - #: Instance variable. - #: Internal reference to `wait_for_completion` - self.wfc = wait_for_completion - #: Instance variable - #: How many seconds to wait between checks for completion. - self.wait_interval = wait_interval - #: Instance variable. - #: How long in seconds to `wait_for_completion` before returning with an - #: exception. A value of -1 means wait forever. - self.max_wait = max_wait - self.loggit = logging.getLogger('curator.actions.replicas') - - def do_dry_run(self): - """ - Log what the output would be, but take no action. - """ - utils.show_dry_run(self.index_list, 'replicas', count=self.count) - - def do_action(self): - """ - Update the replica count of indices in `index_list.indices` - """ - self.loggit.debug( - 'Cannot get update replica count of closed indices. ' - 'Omitting any closed indices.' - ) - self.index_list.filter_closed() - self.index_list.empty_list_check() - self.loggit.info( - 'Setting the replica count to {0} for {1} indices: ' - '{2}'.format(self.count, len(self.index_list.indices), self.index_list.indices) - ) - try: - index_lists = utils.chunk_index_list(self.index_list.indices) - for lst in index_lists: - self.client.indices.put_settings( - index=utils.to_csv(lst), - body={'number_of_replicas': self.count} - ) - if self.wfc and self.count > 0: - self.loggit.debug( - 'Waiting for shards to complete replication for ' - 'indices: {0}'.format(utils.to_csv(lst)) - ) - utils.wait_for_it( - self.client, 'replicas', - wait_interval=self.wait_interval, max_wait=self.max_wait - ) - except Exception as err: - utils.report_failure(err) - -class Rollover(object): - """Rollover Action Class""" - def __init__( - self, client, name, conditions, new_index=None, extra_settings=None, - wait_for_active_shards=1 - ): - """ - :arg client: An :class:`elasticsearch.Elasticsearch` client object - :arg name: The name of the single-index-mapped alias to test for - rollover conditions. - :new_index: The new index name - :arg conditions: A dictionary of conditions to test - :arg extra_settings: Must be either `None`, or a dictionary of settings - to apply to the new index on rollover. This is used in place of - `settings` in the Rollover API, mostly because it's already existent - in other places here in Curator - :arg wait_for_active_shards: The number of shards expected to be active - before returning. - """ - self.loggit = logging.getLogger('curator.actions.rollover') - if not isinstance(conditions, dict): - raise exceptions.ConfigurationError('"conditions" must be a dictionary') - else: - self.loggit.debug('"conditions" is {0}'.format(conditions)) - if not isinstance(extra_settings, dict) and extra_settings is not None: - raise exceptions.ConfigurationError( - '"extra_settings" must be a dictionary or None') - utils.verify_client_object(client) - #: Instance variable. - #: The Elasticsearch Client object - self.client = client - #: Instance variable. - #: Internal reference to `conditions` - self.conditions = self._check_max_size(conditions) - #: Instance variable. - #: Internal reference to `extra_settings` - self.settings = extra_settings - #: Instance variable. - #: Internal reference to `new_index` - self.new_index = utils.parse_date_pattern(new_index) if new_index else new_index - #: Instance variable. - #: Internal reference to `wait_for_active_shards` - self.wait_for_active_shards = wait_for_active_shards - - # Verify that `conditions` and `settings` are good? - # Verify that `name` is an alias, and is only mapped to one index. - if utils.rollable_alias(client, name): - self.name = name - else: - raise ValueError( - 'Unable to perform index rollover with alias ' - '"{0}". See previous logs for more details.'.format(name) - ) - - def _check_max_size(self, conditions): - """ - Ensure that if ``max_size`` is specified, that ``self.client`` - is running 6.1 or higher. - """ - if 'max_size' in conditions: - version = utils.get_version(self.client) - if version < (6, 1, 0): - raise exceptions.ConfigurationError( - 'Your version of elasticsearch ({0}) does not support ' - 'the max_size rollover condition. It is only supported ' - 'in versions 6.1.0 and up.'.format(version) - ) - return conditions - - def body(self): - """ - Create a body from conditions and settings - """ - retval = {} - retval['conditions'] = self.conditions - if self.settings: - retval['settings'] = self.settings - return retval - - def log_result(self, result): - """ - Log the results based on whether the index rolled over or not - """ - dryrun_string = '' - if result['dry_run']: - dryrun_string = 'DRY-RUN: ' - self.loggit.debug('{0}Result: {1}'.format(dryrun_string, result)) - rollover_string = '{0}Old index {1} rolled over to new index {2}'.format( - dryrun_string, - result['old_index'], - result['new_index'] - ) - # Success is determined by at one condition being True - success = False - for k in list(result['conditions'].keys()): - if result['conditions'][k]: - success = True - if result['dry_run'] and success: # log "successful" dry-run - self.loggit.info(rollover_string) - elif result['rolled_over']: - self.loggit.info(rollover_string) - else: - self.loggit.info( - '{0}Rollover conditions not met. Index {1} not rolled over.'.format( - dryrun_string, - result['old_index']) - ) - - def doit(self, dry_run=False): - """ - This exists solely to prevent having to have duplicate code in both - `do_dry_run` and `do_action` - """ - return self.client.indices.rollover( - alias=self.name, - new_index=self.new_index, - body=self.body(), - dry_run=dry_run, - wait_for_active_shards=self.wait_for_active_shards, - ) - - def do_dry_run(self): - """ - Log what the output would be, but take no action. - """ - self.loggit.info('DRY-RUN MODE. No changes will be made.') - self.log_result(self.doit(dry_run=True)) - - def do_action(self): - """ - Rollover the index referenced by alias `name` - """ - self.loggit.info('Performing index rollover') - try: - self.log_result(self.doit()) - except Exception as err: - utils.report_failure(err) - -class DeleteSnapshots(object): - """Delete Snapshots Action Class""" - def __init__(self, slo, retry_interval=120, retry_count=3): - """ - :arg slo: A :class:`curator.snapshotlist.SnapshotList` object - :arg retry_interval: Number of seconds to delay betwen retries. Default: - 120 (seconds) - :arg retry_count: Number of attempts to make. Default: 3 - """ - utils.verify_snapshot_list(slo) - #: Instance variable. - #: The Elasticsearch Client object derived from `slo` - self.client = slo.client - #: Instance variable. - #: Internally accessible copy of `retry_interval` - self.retry_interval = retry_interval - #: Instance variable. - #: Internally accessible copy of `retry_count` - self.retry_count = retry_count - #: Instance variable. - #: Internal reference to `slo` - self.snapshot_list = slo - #: Instance variable. - #: The repository name derived from `slo` - self.repository = slo.repository - self.loggit = logging.getLogger('curator.actions.delete_snapshots') - - def do_dry_run(self): - """ - Log what the output would be, but take no action. - """ - self.loggit.info('DRY-RUN MODE. No changes will be made.') - mykwargs = { - 'repository' : self.repository, - 'retry_interval' : self.retry_interval, - 'retry_count' : self.retry_count, - } - for snap in self.snapshot_list.snapshots: - self.loggit.info( - 'DRY-RUN: delete_snapshot: {0} with arguments: {1}'.format(snap, mykwargs)) - - def do_action(self): - """ - Delete snapshots in `slo` - Retry up to `retry_count` times, pausing `retry_interval` - seconds between retries. - """ - self.snapshot_list.empty_list_check() - self.loggit.info( - 'Deleting {0} selected snapshots: {1}'.format( - len(self.snapshot_list.snapshots), - self.snapshot_list.snapshots - ) - ) - if not utils.safe_to_snap( - self.client, repository=self.repository, - retry_interval=self.retry_interval, retry_count=self.retry_count - ): - raise exceptions.FailedExecution( - 'Unable to delete snapshot(s) because a snapshot is in ' - 'state "IN_PROGRESS"') - try: - for snap in self.snapshot_list.snapshots: - self.loggit.info('Deleting snapshot {0}...'.format(snap)) - self.client.snapshot.delete( - repository=self.repository, snapshot=snap) - except Exception as err: - utils.report_failure(err) - -class Reindex(object): - """Reindex Action Class""" - def __init__( - self, ilo, request_body, refresh=True, requests_per_second=-1, slices=1, timeout=60, - wait_for_active_shards=1, wait_for_completion=True, max_wait=-1, wait_interval=9, - remote_url_prefix=None, remote_ssl_no_validate=None, remote_certificate=None, - remote_client_cert=None, remote_client_key=None, remote_aws_key=None, - remote_aws_secret_key=None, remote_aws_region=None, remote_filters={}, - migration_prefix='', migration_suffix='' - ): - """ - :arg ilo: A :class:`curator.indexlist.IndexList` object - :arg request_body: The body to send to - :py:meth:`elasticsearch.Elasticsearch.reindex`, which must be complete and - usable, as Curator will do no vetting of the request_body. If it - fails to function, Curator will return an exception. - :arg refresh: Whether to refresh the entire target index after the - operation is complete. (default: `True`) - :type refresh: bool - :arg requests_per_second: The throttle to set on this request in - sub-requests per second. ``-1`` means set no throttle as does - ``unlimited`` which is the only non-float this accepts. (default: - ``-1``) - :arg slices: The number of slices this task should be divided into. 1 - means the task will not be sliced into subtasks. (default: ``1``) - :arg timeout: The length in seconds each individual bulk request should - wait for shards that are unavailable. (default: ``60``) - :arg wait_for_active_shards: Sets the number of shard copies that must - be active before proceeding with the reindex operation. (default: - ``1``) means the primary shard only. Set to ``all`` for all shard - copies, otherwise set to any non-negative value less than or equal - to the total number of copies for the shard (number of replicas + 1) - :arg wait_for_completion: Wait (or not) for the operation - to complete before returning. (default: `True`) - :type wait_for_completion: bool - :arg wait_interval: How long in seconds to wait between checks for - completion. - :arg max_wait: Maximum number of seconds to `wait_for_completion` - :arg remote_url_prefix: `Optional` url prefix, if needed to reach the - Elasticsearch API (i.e., it's not at the root level) - :type remote_url_prefix: str - :arg remote_ssl_no_validate: If `True`, do not validate the certificate - chain. This is an insecure option and you will see warnings in the - log output. - :type remote_ssl_no_validate: bool - :arg remote_certificate: Path to SSL/TLS certificate - :arg remote_client_cert: Path to SSL/TLS client certificate (public key) - :arg remote_client_key: Path to SSL/TLS private key - :arg remote_aws_key: AWS IAM Access Key (Only used if the - :mod:`requests-aws4auth` python module is installed) - :arg remote_aws_secret_key: AWS IAM Secret Access Key (Only used if the - :mod:`requests-aws4auth` python module is installed) - :arg remote_aws_region: AWS Region (Only used if the - :mod:`requests-aws4auth` python module is installed) - :arg remote_filters: Apply these filters to the remote client for - remote index selection. - :arg migration_prefix: When migrating, prepend this value to the index - name. - :arg migration_suffix: When migrating, append this value to the index - name. - """ - self.loggit = logging.getLogger('curator.actions.reindex') - utils.verify_index_list(ilo) - # Normally, we'd check for an empty list here. But since we can reindex - # from remote, we might just be starting with an empty one. - # ilo.empty_list_check() - if not isinstance(request_body, dict): - raise exceptions.ConfigurationError('"request_body" is not of type dictionary') - #: Instance variable. - #: Internal reference to `request_body` - self.body = request_body - self.loggit.debug('REQUEST_BODY = {0}'.format(request_body)) - #: Instance variable. - #: The Elasticsearch Client object derived from `ilo` - self.client = ilo.client - #: Instance variable. - #: Internal reference to `ilo` - self.index_list = ilo - #: Instance variable. - #: Internal reference to `refresh` - self.refresh = refresh - #: Instance variable. - #: Internal reference to `requests_per_second` - self.requests_per_second = requests_per_second - #: Instance variable. - #: Internal reference to `slices` - self.slices = slices - #: Instance variable. - #: Internal reference to `timeout`, and add "s" for seconds. - self.timeout = '{0}s'.format(timeout) - #: Instance variable. - #: Internal reference to `wait_for_active_shards` - self.wait_for_active_shards = wait_for_active_shards - #: Instance variable. - #: Internal reference to `wait_for_completion` - self.wfc = wait_for_completion - #: Instance variable - #: How many seconds to wait between checks for completion. - self.wait_interval = wait_interval - #: Instance variable. - #: How long in seconds to `wait_for_completion` before returning with an - #: exception. A value of -1 means wait forever. - self.max_wait = max_wait - #: Instance variable. - #: Internal reference to `migration_prefix` - self.mpfx = migration_prefix - #: Instance variable. - #: Internal reference to `migration_suffix` - self.msfx = migration_suffix - - # This is for error logging later... - self.remote = False - if 'remote' in self.body['source']: - self.remote = True - - self.migration = False - if self.body['dest']['index'] == 'MIGRATION': - self.migration = True - - if self.migration: - if not self.remote and not self.mpfx and not self.msfx: - raise exceptions.ConfigurationError( - 'MIGRATION can only be used locally with one or both of ' - 'migration_prefix or migration_suffix.' - ) - - # REINDEX_SELECTION is the designated token. If you use this for the - # source "index," it will be replaced with the list of indices from the - # provided 'ilo' (index list object). - if self.body['source']['index'] == 'REINDEX_SELECTION' \ - and not self.remote: - self.body['source']['index'] = self.index_list.indices - - # Remote section - elif self.remote: - self.loggit.debug('Remote reindex request detected') - if 'host' not in self.body['source']['remote']: - raise exceptions.ConfigurationError('Missing remote "host"') - rclient_info = {} - for k in ['host', 'username', 'password']: - rclient_info[k] = self.body['source']['remote'][k] \ - if k in self.body['source']['remote'] else None - rhost = rclient_info['host'] - try: - # Save these for logging later - _ = rhost.split(':') - self.remote_port = _[2] - self.remote_host = _[1][2:] - except Exception as err: - raise exceptions.ConfigurationError( - 'Host must be in the form [scheme]://[host]:[port] but ' - 'was [{0}]'.format(rhost) - ) - rhttp_auth = '{0}:{1}'.format( - rclient_info['username'], rclient_info['password']) \ - if (rclient_info['username'] and rclient_info['password']) else None - if rhost[:5] == 'http:': - use_ssl = False - elif rhost[:5] == 'https': - use_ssl = True - else: - raise exceptions.ConfigurationError( - 'Host must be in URL format. You provided: ' - '{0}'.format(rclient_info['host']) - ) - - # Let's set a decent remote timeout for initially reading - # the indices on the other side, and collecting their metadata - remote_timeout = 180 - - # The rest only applies if using filters for remote indices - if self.body['source']['index'] == 'REINDEX_SELECTION': - self.loggit.debug('Filtering indices from remote') - from .indexlist import IndexList - self.loggit.debug( - 'Remote client args: ' - 'host={0} ' - 'http_auth={1} ' - 'url_prefix={2} ' - 'use_ssl={3} ' - 'ssl_no_validate={4} ' - 'certificate={5} ' - 'client_cert={6} ' - 'client_key={7} ' - 'aws_key={8} ' - 'aws_secret_key={9} ' - 'aws_region={10} ' - 'timeout={11} ' - 'skip_version_test=True'.format( - rhost, - rhttp_auth, - remote_url_prefix, - use_ssl, - remote_ssl_no_validate, - remote_certificate, - remote_client_cert, - remote_client_key, - remote_aws_key, - remote_aws_secret_key, - remote_aws_region, - remote_timeout - ) - ) - - try: # let's try to build a remote connection with these! - rclient = utils.get_client( - host=rhost, - http_auth=rhttp_auth, - url_prefix=remote_url_prefix, - use_ssl=use_ssl, - ssl_no_validate=remote_ssl_no_validate, - certificate=remote_certificate, - client_cert=remote_client_cert, - client_key=remote_client_key, - aws_key=remote_aws_key, - aws_secret_key=remote_aws_secret_key, - aws_region=remote_aws_region, - skip_version_test=True, - timeout=remote_timeout - ) - except Exception as err: - self.loggit.error( - 'Unable to establish connection to remote Elasticsearch' - ' with provided credentials/certificates/settings.' - ) - utils.report_failure(err) - try: - rio = IndexList(rclient) - rio.iterate_filters({'filters': remote_filters}) - try: - rio.empty_list_check() - except exceptions.NoIndices: - raise exceptions.FailedExecution( - 'No actionable remote indices selected after ' - 'applying filters.' - ) - self.body['source']['index'] = rio.indices - except Exception as err: - self.loggit.error( - 'Unable to get/filter list of remote indices.' - ) - utils.report_failure(err) - - self.loggit.debug( - 'Reindexing indices: {0}'.format(self.body['source']['index'])) - - def _get_request_body(self, source, dest): - body = deepcopy(self.body) - body['source']['index'] = source - body['dest']['index'] = dest - return body - - def _get_reindex_args(self, source, dest): - # Always set wait_for_completion to False. Let 'utils.wait_for_it' do its - # thing if wait_for_completion is set to True. Report the task_id - # either way. - reindex_args = { - 'body':self._get_request_body(source, dest), 'refresh':self.refresh, - 'requests_per_second': self.requests_per_second, - 'timeout': self.timeout, - 'wait_for_active_shards': self.wait_for_active_shards, - 'wait_for_completion': False, - 'slices': self.slices - } - version = utils.get_version(self.client) - if version < (5, 1, 0): - self.loggit.info( - 'Your version of elasticsearch ({0}) does not support ' - 'sliced scroll for reindex, so that setting will not be ' - 'used'.format(version) - ) - del reindex_args['slices'] - return reindex_args - - def get_processed_items(self, task_id): - """ - This function calls client.tasks.get with the provided `task_id`. It will get the value - from ``'response.total'`` as the total number of elements processed during reindexing. - If the value is not found, it will return -1 - - :arg task_id: A task_id which ostensibly matches a task searchable in the - tasks API. - """ - try: - task_data = self.client.tasks.get(task_id=task_id) - except Exception as err: - raise exceptions.CuratorException( - 'Unable to obtain task information for task_id "{0}". Exception ' - '{1}'.format(task_id, err) - ) - total_processed_items = -1 - task = task_data['task'] - if task['action'] == 'indices:data/write/reindex': - self.loggit.debug('It\'s a REINDEX TASK') - self.loggit.debug('TASK_DATA: {0}'.format(task_data)) - self.loggit.debug('TASK_DATA keys: {0}'.format(list(task_data.keys()))) - if 'response' in task_data: - response = task_data['response'] - total_processed_items = response['total'] - self.loggit.debug('total_processed_items = {0}'.format(total_processed_items)) - - return total_processed_items - - def _post_run_quick_check(self, index_name, task_id): - # Check whether any documents were processed - # if no documents processed, the target index "dest" won't exist - processed_items = self.get_processed_items(task_id) - if processed_items == 0: - self.loggit.info( - 'No items were processed. Will not check if target index "{0}" ' - 'exists'.format(index_name) - ) - else: - # Verify the destination index is there after the fact - index_exists = self.client.indices.exists(index=index_name) - alias_instead = self.client.indices.exists_alias(name=index_name) - if not index_exists and not alias_instead: - self.loggit.error( - 'The index described as "{0}" was not found after the reindex ' - 'operation. Check Elasticsearch logs for more ' - 'information.'.format(index_name) - ) - if self.remote: - self.loggit.error( - 'Did you forget to add "reindex.remote.whitelist: ' - '{0}:{1}" to the elasticsearch.yml file on the ' - '"dest" node?'.format( - self.remote_host, self.remote_port - ) - ) - raise exceptions.FailedExecution( - 'Reindex failed. The index or alias identified by "{0}" was ' - 'not found.'.format(index_name) - ) - - def sources(self): - """Generator for sources & dests""" - dest = self.body['dest']['index'] - source_list = utils.ensure_list(self.body['source']['index']) - self.loggit.debug('source_list: {0}'.format(source_list)) - if not source_list or source_list == ['REINDEX_SELECTED']: # Empty list - raise exceptions.NoIndices - if not self.migration: - yield self.body['source']['index'], dest - - # Loop over all sources (default will only be one) - else: - for source in source_list: - if self.migration: - dest = self.mpfx + source + self.msfx - yield source, dest - - def show_run_args(self, source, dest): - """ - Show what will run - """ - - return ( - 'request body: {0} with arguments: ' - 'refresh={1} ' - 'requests_per_second={2} ' - 'slices={3} ' - 'timeout={4} ' - 'wait_for_active_shards={5} ' - 'wait_for_completion={6}'.format( - self._get_request_body(source, dest), - self.refresh, - self.requests_per_second, - self.slices, - self.timeout, - self.wait_for_active_shards, - self.wfc - ) - ) - - def do_dry_run(self): - """ - Log what the output would be, but take no action. - """ - self.loggit.info('DRY-RUN MODE. No changes will be made.') - for source, dest in self.sources(): - self.loggit.info( - 'DRY-RUN: REINDEX: {0}'.format(self.show_run_args(source, dest)) - ) - - def do_action(self): - """ - Execute :py:meth:`elasticsearch.Elasticsearch.reindex` operation with the - provided request_body and arguments. - """ - try: - # Loop over all sources (default will only be one) - for source, dest in self.sources(): - self.loggit.info('Commencing reindex operation') - self.loggit.debug( - 'REINDEX: {0}'.format(self.show_run_args(source, dest))) - response = self.client.reindex(**self._get_reindex_args(source, dest)) - - self.loggit.debug('TASK ID = {0}'.format(response['task'])) - if self.wfc: - utils.wait_for_it( - self.client, 'reindex', task_id=response['task'], - wait_interval=self.wait_interval, max_wait=self.max_wait - ) - self._post_run_quick_check(dest, response['task']) - - else: - self.loggit.warn( - '"wait_for_completion" set to {0}. Remember ' - 'to check task_id "{1}" for successful completion ' - 'manually.'.format(self.wfc, response['task']) - ) - except exceptions.NoIndices as err: - raise exceptions.NoIndices( - 'Source index must be list of actual indices. ' - 'It must not be an empty list.') - except Exception as err: - utils.report_failure(err) - - -class Snapshot(object): - """Snapshot Action Class""" - def __init__( - self, ilo, repository=None, name=None, ignore_unavailable=False, - include_global_state=True, partial=False, wait_for_completion=True, wait_interval=9, - max_wait=-1, skip_repo_fs_check=False - ): - """ - :arg ilo: A :class:`curator.indexlist.IndexList` object - :arg repository: The Elasticsearch snapshot repository to use - :arg name: What to name the snapshot. - :arg wait_for_completion: Wait (or not) for the operation - to complete before returning. (default: `True`) - :type wait_for_completion: bool - :arg wait_interval: How long in seconds to wait between checks for - completion. - :arg max_wait: Maximum number of seconds to `wait_for_completion` - :arg ignore_unavailable: Ignore unavailable shards/indices. - (default: `False`) - :type ignore_unavailable: bool - :arg include_global_state: Store cluster global state with snapshot. - (default: `True`) - :type include_global_state: bool - :arg partial: Do not fail if primary shard is unavailable. (default: - `False`) - :type partial: bool - :arg skip_repo_fs_check: Do not validate write access to repository on - all cluster nodes before proceeding. (default: `False`). Useful for - shared filesystems where intermittent timeouts can affect - validation, but won't likely affect snapshot success. - :type skip_repo_fs_check: bool - """ - utils.verify_index_list(ilo) - # Check here and don't bother with the rest of this if there are no - # indices in the index list. - ilo.empty_list_check() - if not utils.repository_exists(ilo.client, repository=repository): - raise exceptions.ActionError( - 'Cannot snapshot indices to missing repository: ' - '{0}'.format(repository) - ) - if not name: - raise exceptions.MissingArgument('No value for "name" provided.') - #: Instance variable. - #: The Elasticsearch Client object derived from `ilo` - self.client = ilo.client - #: Instance variable. - #: The parsed version of `name` - self.name = utils.parse_datemath(self.client, utils.parse_date_pattern(name)) - #: Instance variable. - #: Internal reference to `ilo` - self.index_list = ilo - #: Instance variable. - #: Internally accessible copy of `repository` - self.repository = repository - #: Instance variable. - #: Internally accessible copy of `wait_for_completion` - self.wait_for_completion = wait_for_completion - #: Instance variable - #: How many seconds to wait between checks for completion. - self.wait_interval = wait_interval - #: Instance variable. - #: How long in seconds to `wait_for_completion` before returning with an - #: exception. A value of -1 means wait forever. - self.max_wait = max_wait - #: Instance variable. - #: Internally accessible copy of `skip_repo_fs_check` - self.skip_repo_fs_check = skip_repo_fs_check - self.state = None - - #: Instance variable. - #: Populated at instance creation time by calling - #: :mod:`curator.utils.utils.create_snapshot_body` with `ilo.indices` and the - #: provided arguments: `ignore_unavailable`, `include_global_state`, - #: `partial` - self.body = utils.create_snapshot_body( - ilo.indices, - ignore_unavailable=ignore_unavailable, - include_global_state=include_global_state, - partial=partial - ) - - self.loggit = logging.getLogger('curator.actions.snapshot') - - def get_state(self): - """ - Get the state of the snapshot - """ - try: - self.state = self.client.snapshot.get( - repository=self.repository, - snapshot=self.name)['snapshots'][0]['state'] - return self.state - except IndexError: - raise exceptions.CuratorException( - 'Snapshot "{0}" not found in repository ' - '"{1}"'.format(self.name, self.repository) - ) - - def report_state(self): - """ - Log the state of the snapshot and raise an exception if the state is - not ``SUCCESS`` - """ - self.get_state() - if self.state == 'SUCCESS': - self.loggit.info('Snapshot {0} successfully completed.'.format(self.name)) - else: - msg = 'Snapshot {0} completed with state: {0}'.format(self.state) - self.loggit.error(msg) - raise exceptions.FailedSnapshot(msg) - - def do_dry_run(self): - """ - Log what the output would be, but take no action. - """ - self.loggit.info('DRY-RUN MODE. No changes will be made.') - self.loggit.info( - 'DRY-RUN: snapshot: {0} in repository {1} with arguments: ' - '{2}'.format(self.name, self.repository, self.body) - ) - - def do_action(self): - """ - Snapshot indices in `index_list.indices`, with options passed. - """ - if not self.skip_repo_fs_check: - utils.test_repo_fs(self.client, self.repository) - if utils.snapshot_running(self.client): - raise exceptions.SnapshotInProgress('Snapshot already in progress.') - try: - self.loggit.info( - 'Creating snapshot "{0}" from indices: {1}'.format( - self.name, self.index_list.indices - ) - ) - # Always set wait_for_completion to False. Let 'utils.wait_for_it' do its - # thing if wait_for_completion is set to True. Report the task_id - # either way. - self.client.snapshot.create( - repository=self.repository, snapshot=self.name, body=self.body, - wait_for_completion=False - ) - if self.wait_for_completion: - utils.wait_for_it( - self.client, 'snapshot', snapshot=self.name, - repository=self.repository, - wait_interval=self.wait_interval, max_wait=self.max_wait - ) - self.report_state() - else: - self.loggit.warn( - '"wait_for_completion" set to {0}.' - 'Remember to check for successful completion ' - 'manually.'.format(self.wait_for_completion) - ) - except Exception as err: - utils.report_failure(err) - -class Restore(object): - """Restore Action Class""" - def __init__( - self, slo, name=None, indices=None, include_aliases=False, ignore_unavailable=False, - include_global_state=False, partial=False, rename_pattern=None, - rename_replacement=None, extra_settings={}, wait_for_completion=True, wait_interval=9, - max_wait=-1, skip_repo_fs_check=False - ): - """ - :arg slo: A :class:`curator.snapshotlist.SnapshotList` object - :arg name: Name of the snapshot to restore. If no name is provided, it - will restore the most recent snapshot by age. - :type name: str - :arg indices: A list of indices to restore. If no indices are provided, - it will restore all indices in the snapshot. - :type indices: list - :arg include_aliases: If set to `True`, restore aliases with the - indices. (default: `False`) - :type include_aliases: bool - :arg ignore_unavailable: Ignore unavailable shards/indices. - (default: `False`) - :type ignore_unavailable: bool - :arg include_global_state: Restore cluster global state with snapshot. - (default: `False`) - :type include_global_state: bool - :arg partial: Do not fail if primary shard is unavailable. (default: - `False`) - :type partial: bool - :arg rename_pattern: A regular expression pattern with one or more - captures, e.g. ``index_(.+)`` - :type rename_pattern: str - :arg rename_replacement: A target index name pattern with `$#` numbered - references to the captures in ``rename_pattern``, e.g. - ``restored_index_$1`` - :type rename_replacement: str - :arg extra_settings: Extra settings, including shard count and settings - to omit. For more information see - https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshots-restore-snapshot.html#change-index-settings-during-restore - :type extra_settings: dict, representing the settings. - :arg wait_for_completion: Wait (or not) for the operation - to complete before returning. (default: `True`) - :arg wait_interval: How long in seconds to wait between checks for - completion. - :arg max_wait: Maximum number of seconds to `wait_for_completion` - :type wait_for_completion: bool - - :arg skip_repo_fs_check: Do not validate write access to repository on - all cluster nodes before proceeding. (default: `False`). Useful for - shared filesystems where intermittent timeouts can affect - validation, but won't likely affect snapshot success. - :type skip_repo_fs_check: bool - """ - self.loggit = logging.getLogger('curator.actions.snapshot') - utils.verify_snapshot_list(slo) - # Get the most recent snapshot. - most_recent = slo.most_recent() - self.loggit.debug('"most_recent" snapshot: {0}'.format(most_recent)) - #: Instance variable. - #: Will use a provided snapshot name, or the most recent snapshot in slo - self.name = name if name else most_recent - # Stop here now, if it's not a successful snapshot. - if slo.snapshot_info[self.name]['state'] == 'PARTIAL' and partial: - self.loggit.warn( - 'Performing restore of snapshot in state PARTIAL.') - elif slo.snapshot_info[self.name]['state'] != 'SUCCESS': - raise exceptions.CuratorException( - 'Restore operation can only be performed on snapshots with ' - 'state "SUCCESS", or "PARTIAL" if partial=True.' - ) - #: Instance variable. - #: The Elasticsearch Client object derived from `slo` - self.client = slo.client - #: Instance variable. - #: Internal reference to `slo` - self.snapshot_list = slo - #: Instance variable. - #: `repository` derived from `slo` - self.repository = slo.repository - - if indices: - self.indices = utils.ensure_list(indices) - else: - self.indices = slo.snapshot_info[self.name]['indices'] - self.wfc = wait_for_completion - #: Instance variable - #: How many seconds to wait between checks for completion. - self.wait_interval = wait_interval - #: Instance variable. - #: How long in seconds to `wait_for_completion` before returning with an - #: exception. A value of -1 means wait forever. - self.max_wait = max_wait - #: Instance variable version of ``rename_pattern`` - self.rename_pattern = rename_pattern if rename_replacement is not None \ - else '' - #: Instance variable version of ``rename_replacement`` - self.rename_replacement = rename_replacement if rename_replacement \ - is not None else '' - #: Also an instance variable version of ``rename_replacement`` - #: but with Java regex group designations of ``$#`` - #: converted to Python's ``\\#`` style. - self.py_rename_replacement = self.rename_replacement.replace('$', '\\') - #: Instance variable. - #: Internally accessible copy of `skip_repo_fs_check` - self.skip_repo_fs_check = skip_repo_fs_check - - #: Instance variable. - #: Populated at instance creation time from the other options - self.body = { - 'indices' : self.indices, - 'include_aliases' : include_aliases, - 'ignore_unavailable' : ignore_unavailable, - 'include_global_state' : include_global_state, - 'partial' : partial, - 'rename_pattern' : self.rename_pattern, - 'rename_replacement' : self.rename_replacement, - } - if extra_settings: - self.loggit.debug( - 'Adding extra_settings to restore body: ' - '{0}'.format(extra_settings) - ) - try: - self.body.update(extra_settings) - except: - self.loggit.error( - 'Unable to apply extra settings to restore body') - self.loggit.debug('REPOSITORY: {0}'.format(self.repository)) - self.loggit.debug('WAIT_FOR_COMPLETION: {0}'.format(self.wfc)) - self.loggit.debug( - 'SKIP_REPO_FS_CHECK: {0}'.format(self.skip_repo_fs_check)) - self.loggit.debug('BODY: {0}'.format(self.body)) - # Populate the expected output index list. - self._get_expected_output() - - def _get_expected_output(self): - if not self.rename_pattern and not self.rename_replacement: - self.expected_output = self.indices - return # Don't stick around if we're not replacing anything - self.expected_output = [] - for index in self.indices: - self.expected_output.append( - re.sub( - self.rename_pattern, - self.py_rename_replacement, - index - ) - ) - self.loggit.debug('index: {0} replacement: {1}'.format(index, self.expected_output[-1])) - - def report_state(self): - """ - Log the state of the restore - This should only be done if ``wait_for_completion`` is `True`, and only - after completing the restore. - """ - all_indices = utils.get_indices(self.client) - found_count = 0 - missing = [] - for index in self.expected_output: - if index in all_indices: - found_count += 1 - self.loggit.info('Found restored index {0}'.format(index)) - else: - missing.append(index) - if found_count == len(self.expected_output): - self.loggit.info('All indices appear to have been restored.') - else: - msg = ( - 'Some of the indices do not appear to have been restored. Missing: ' - '{0}'.format(missing) - ) - self.loggit.error(msg) - raise exceptions.FailedRestore(msg) - - def do_dry_run(self): - """ - Log what the output would be, but take no action. - """ - self.loggit.info('DRY-RUN MODE. No changes will be made.') - self.loggit.info( - 'DRY-RUN: restore: Repository: {0} Snapshot name: {1} Arguments: ' - '{2}'.format( - self.repository, self.name, - {'wait_for_completion' : self.wfc, 'body' : self.body} - ) - ) - - for index in self.indices: - if self.rename_pattern and self.rename_replacement: - replacement_msg = 'as {0}'.format( - re.sub( - self.rename_pattern, - self.py_rename_replacement, - index - ) - ) - else: - replacement_msg = '' - self.loggit.info( - 'DRY-RUN: restore: Index {0} {1}'.format(index, replacement_msg) - ) - - def do_action(self): - """ - Restore indices with options passed. - """ - if not self.skip_repo_fs_check: - utils.test_repo_fs(self.client, self.repository) - if utils.snapshot_running(self.client): - raise exceptions.SnapshotInProgress('Cannot restore while a snapshot is in progress.') - try: - self.loggit.info( - 'Restoring indices "{0}" from snapshot: {1}'.format(self.indices, self.name) - ) - # Always set wait_for_completion to False. Let 'utils.wait_for_it' do its - # thing if wait_for_completion is set to True. Report the task_id - # either way. - self.client.snapshot.restore( - repository=self.repository, snapshot=self.name, body=self.body, - wait_for_completion=False - ) - if self.wfc: - utils.wait_for_it( - self.client, 'restore', index_list=self.expected_output, - wait_interval=self.wait_interval, max_wait=self.max_wait - ) - self.report_state() - else: - self.loggit.warn( - '"wait_for_completion" set to {0}. ' - 'Remember to check for successful completion ' - 'manually.'.format(self.wfc) - ) - except Exception as err: - utils.report_failure(err) - -class Shrink(object): - """Shrink Action Class""" - def __init__( - self, ilo, shrink_node='DETERMINISTIC', node_filters={}, number_of_shards=1, - number_of_replicas=1, shrink_prefix='', shrink_suffix='-shrink', copy_aliases=False, - delete_after=True, post_allocation={}, wait_for_active_shards=1, - wait_for_rebalance=True, extra_settings={}, wait_for_completion=True, wait_interval=9, - max_wait=-1 - ): - """ - :arg ilo: A :class:`curator.indexlist.IndexList` object - :arg shrink_node: The node name to use as the shrink target, or - ``DETERMINISTIC``, which will use the values in ``node_filters`` to - determine which node will be the shrink node. - :arg node_filters: If the value of ``shrink_node`` is ``DETERMINISTIC``, - the values in ``node_filters`` will be used while determining which - node to allocate the shards on before performing the shrink. - :type node_filters: dict, representing the filters - :arg number_of_shards: The number of shards the shrunk index should have - :arg number_of_replicas: The number of replicas for the shrunk index - :arg shrink_prefix: Prepend the shrunk index with this value - :arg shrink_suffix: Append the value to the shrunk index (default: `-shrink`) - :arg copy_aliases: Whether to copy each source index aliases to target index after - shrinking. The aliases will be added to target index and deleted from source index at - the same time(default: `False`) - :type copy_aliases: bool - :arg delete_after: Whether to delete each index after shrinking. (default: `True`) - :type delete_after: bool - :arg post_allocation: If populated, the `allocation_type`, `key`, and - `value` will be applied to the shrunk index to re-route it. - :type post_allocation: dict, with keys `allocation_type`, `key`, and `value` - :arg wait_for_active_shards: The number of shards expected to be active before returning. - :arg extra_settings: Permitted root keys are `settings` and `aliases`. - :type extra_settings: dict - :arg wait_for_rebalance: Wait for rebalance. (default: `True`) - :type wait_for_rebalance: bool - :arg wait_for_active_shards: Wait for active shards before returning. - :arg wait_for_completion: Wait (or not) for the operation - to complete before returning. You should not normally change this, - ever. (default: `True`) - :arg wait_interval: How long in seconds to wait between checks for - completion. - :arg max_wait: Maximum number of seconds to `wait_for_completion` - :type wait_for_completion: bool - """ - self.loggit = logging.getLogger('curator.actions.shrink') - utils.verify_index_list(ilo) - if 'permit_masters' not in node_filters: - node_filters['permit_masters'] = False - #: Instance variable. The Elasticsearch Client object derived from `ilo` - self.client = ilo.client - #: Instance variable. Internal reference to `ilo` - self.index_list = ilo - #: Instance variable. Internal reference to `shrink_node` - self.shrink_node = shrink_node - #: Instance variable. Internal reference to `node_filters` - self.node_filters = node_filters - #: Instance variable. Internal reference to `shrink_prefix` - self.shrink_prefix = shrink_prefix - #: Instance variable. Internal reference to `shrink_suffix` - self.shrink_suffix = shrink_suffix - #: Instance variable. Internal reference to `copy_aliases` - self.copy_aliases = copy_aliases - #: Instance variable. Internal reference to `delete_after` - self.delete_after = delete_after - #: Instance variable. Internal reference to `post_allocation` - self.post_allocation = post_allocation - #: Instance variable. Internal reference to `wait_for_rebalance` - self.wait_for_rebalance = wait_for_rebalance - #: Instance variable. Internal reference to `wait_for_completion` - self.wfc = wait_for_completion - #: Instance variable. How many seconds to wait between checks for completion. - self.wait_interval = wait_interval - #: Instance variable. How long in seconds to `wait_for_completion` before returning with an - #: exception. A value of -1 means wait forever. - self.max_wait = max_wait - #: Instance variable. Internal reference to `number_of_shards` - self.number_of_shards = number_of_shards - self.wait_for_active_shards = wait_for_active_shards - self.shrink_node_name = None - - self.body = { - 'settings': { - 'index.number_of_shards' : number_of_shards, - 'index.number_of_replicas' : number_of_replicas, - } - } - - if extra_settings: - self._merge_extra_settings(extra_settings) - - if utils.get_version(self.client) >= (6, 1, 0): - self._merge_extra_settings({ - 'settings': { - 'index.routing.allocation.require._name': None, - 'index.blocks.write': None - }}) - - def _merge_extra_settings(self, extra_settings): - self.loggit.debug( - 'Adding extra_settings to shrink body: ' - '{0}'.format(extra_settings) - ) - # Pop these here, otherwise we could overwrite our default number of - # shards and replicas - if 'settings' in extra_settings: - settings = extra_settings.pop('settings') - try: - self.body['settings'].update(settings) - except Exception as err: - raise exceptions.ConfigurationError( - 'Unable to apply extra settings "{0}" to shrink body. Exception: {1}'.format( - {'settings':settings}, err - ) - ) - if extra_settings: - try: # Apply any remaining keys, should there be any. - self.body.update(extra_settings) - except Exception as err: - raise exceptions.ConfigurationError( - 'Unable to apply extra settings "{0}" to shrink body. Exception: {1}'.format( - extra_settings, err - ) - ) - - def _data_node(self, node_id): - roles = utils.node_roles(self.client, node_id) - name = utils.node_id_to_name(self.client, node_id) - if not 'data' in roles: - self.loggit.info('Skipping node "{0}": non-data node'.format(name)) - return False - if 'master' in roles and not self.node_filters['permit_masters']: - self.loggit.info('Skipping node "{0}": master node'.format(name)) - return False - elif 'master' in roles and self.node_filters['permit_masters']: - self.loggit.warn( - 'Not skipping node "{0}" which is a master node (not recommended), but ' - 'permit_masters is True'.format(name) - ) - return True - else: # It does have `data` as a role. - return True - - def _exclude_node(self, name): - if 'exclude_nodes' in self.node_filters: - if name in self.node_filters['exclude_nodes']: - self.loggit.info('Excluding node "{0}" due to node_filters'.format(name)) - return True - return False - - def _shrink_target(self, name): - return '{0}{1}{2}'.format(self.shrink_prefix, name, self.shrink_suffix) - - def qualify_single_node(self): - """Qualify a single node as a shrink target""" - node_id = utils.name_to_node_id(self.client, self.shrink_node) - if node_id: - self.shrink_node_id = node_id - self.shrink_node_name = self.shrink_node - else: - raise exceptions.ConfigurationError( - 'Unable to find node named: "{0}"'.format(self.shrink_node)) - if self._exclude_node(self.shrink_node): - raise exceptions.ConfigurationError( - 'Node "{0}" listed for exclusion'.format(self.shrink_node)) - if not self._data_node(node_id): - raise exceptions.ActionError( - 'Node "{0}" is not usable as a shrink node'.format(self.shrink_node)) - self.shrink_node_avail = ( - self.client.nodes.stats()['nodes'][node_id]['fs']['total']['available_in_bytes'] - ) - - def most_available_node(self): - """ - Determine which data node name has the most available free space, and - meets the other node filters settings. - - :arg client: An :class:`elasticsearch.Elasticsearch` client object - """ - mvn_avail = 0 - # mvn_total = 0 - mvn_name = None - mvn_id = None - nodes = self.client.nodes.stats()['nodes'] - for node_id in nodes: - name = nodes[node_id]['name'] - if self._exclude_node(name): - self.loggit.debug('Node "{0}" excluded by node filters'.format(name)) - continue - if not self._data_node(node_id): - self.loggit.debug('Node "{0}" is not a data node'.format(name)) - continue - value = nodes[node_id]['fs']['total']['available_in_bytes'] - if value > mvn_avail: - mvn_name = name - mvn_id = node_id - mvn_avail = value - # mvn_total = nodes[node_id]['fs']['total']['total_in_bytes'] - self.shrink_node_name = mvn_name - self.shrink_node_id = mvn_id - self.shrink_node_avail = mvn_avail - # self.shrink_node_total = mvn_total - - def route_index(self, idx, allocation_type, key, value): - """Apply the indicated shard routing allocation""" - bkey = 'index.routing.allocation.{0}.{1}'.format(allocation_type, key) - routing = {bkey : value} - try: - self.client.indices.put_settings(index=idx, body=routing) - if self.wait_for_rebalance: - utils.wait_for_it( - self.client, 'allocation', wait_interval=self.wait_interval, - max_wait=self.max_wait - ) - else: - utils.wait_for_it( - self.client, 'relocate', index=idx, wait_interval=self.wait_interval, - max_wait=self.max_wait - ) - except Exception as err: - utils.report_failure(err) - - def __log_action(self, error_msg, dry_run=False): - if not dry_run: - raise exceptions.ActionError(error_msg) - else: - self.loggit.warn('DRY-RUN: {0}'.format(error_msg)) - - def _block_writes(self, idx): - block = {'index.blocks.write': True} - self.client.indices.put_settings(index=idx, body=block) - - def _unblock_writes(self, idx): - unblock = {'index.blocks.write': False} - self.client.indices.put_settings(index=idx, body=unblock) - - def _check_space(self, idx, dry_run=False): - # Disk watermark calculation is already baked into `available_in_bytes` - size = utils.index_size(self.client, idx, value='primaries') - padded = (size * 2) + (32 * 1024) - if padded < self.shrink_node_avail: - self.loggit.debug( - 'Sufficient space available for 2x the size of index "{0}". Required: {1}, ' - 'available: {2}'.format(idx, padded, self.shrink_node_avail) - ) - else: - error_msg = ( - 'Insufficient space available for 2x the size of index "{0}", shrinking will ' - 'exceed space available. Required: {1}, available: {2}'.format( - idx, padded, self.shrink_node_avail - ) - ) - self.__log_action(error_msg, dry_run) - - def _check_node(self): - if self.shrink_node != 'DETERMINISTIC': - if not self.shrink_node_name: - self.qualify_single_node() - else: - self.most_available_node() - # At this point, we should have the three shrink-node identifying - # instance variables: - # - self.shrink_node_name - # - self.shrink_node_id - # - self.shrink_node_avail - # # - self.shrink_node_total - only if needed in the future - - def _check_target_exists(self, idx, dry_run=False): - target = self._shrink_target(idx) - if self.client.indices.exists(target): - error_msg = 'Target index "{0}" already exists'.format(target) - self.__log_action(error_msg, dry_run) - - def _check_doc_count(self, idx, dry_run=False): - max_docs = 2147483519 - doc_count = self.client.indices.stats(idx)['indices'][idx]['primaries']['docs']['count'] - if doc_count > (max_docs * self.number_of_shards): - error_msg = ( - 'Too many documents ({0}) to fit in {1} shard(s). Maximum number of docs per ' - 'shard is {2}'.format(doc_count, self.number_of_shards, max_docs) - ) - self.__log_action(error_msg, dry_run) - - def _check_shard_count(self, idx, src_shards, dry_run=False): - if self.number_of_shards >= src_shards: - error_msg = ( - 'Target number of shards ({0}) must be less than current number of shards ({1}) ' - 'in index "{2}"'.format(self.number_of_shards, src_shards, idx) - ) - self.__log_action(error_msg, dry_run) - - def _check_shard_factor(self, idx, src_shards, dry_run=False): - # Find the list of factors of src_shards - factors = [x for x in range(1, src_shards+1) if src_shards % x == 0] - # Pop the last one, because it will be the value of src_shards - factors.pop() - if not self.number_of_shards in factors: - error_msg = ( - '"{0}" is not a valid factor of {1} shards. Valid values are ' - '{2}'.format(self.number_of_shards, src_shards, factors) - ) - self.__log_action(error_msg, dry_run) - - def _check_all_shards(self, idx): - shards = self.client.cluster.state(index=idx)['routing_table']['indices'][idx]['shards'] - found = [] - for shardnum in shards: - for shard_idx in range(0, len(shards[shardnum])): - if shards[shardnum][shard_idx]['node'] == self.shrink_node_id: - found.append( - {'shard': shardnum, 'primary': shards[shardnum][shard_idx]['primary']}) - if len(shards) != len(found): - self.loggit.debug( - 'Found these shards on node "{0}": {1}'.format(self.shrink_node_name, found)) - raise exceptions.ActionError( - 'Unable to shrink index "{0}" as not all shards were found on the designated ' - 'shrink node ({1}): {2}'.format(idx, self.shrink_node_name, found) - ) - - def pre_shrink_check(self, idx, dry_run=False): - """Do a shrink preflight check""" - self.loggit.debug('BEGIN PRE_SHRINK_CHECK') - self.loggit.debug('Check that target exists') - self._check_target_exists(idx, dry_run) - self.loggit.debug('Check doc count constraints') - self._check_doc_count(idx, dry_run) - self.loggit.debug('Check shard count') - src_shards = int(self.client.indices.get(idx)[idx]['settings']['index']['number_of_shards']) - self._check_shard_count(idx, src_shards, dry_run) - self.loggit.debug('Check shard factor') - self._check_shard_factor(idx, src_shards, dry_run) - self.loggit.debug('Check node availability') - self._check_node() - self.loggit.debug('Check available disk space') - self._check_space(idx, dry_run) - self.loggit.debug('FINISH PRE_SHRINK_CHECK') - - def do_copy_aliases(self, source_idx, target_idx): - """Copy the aliases to the shrunk index""" - alias_actions = [] - aliases = self.client.indices.get_alias(index=source_idx) - for alias in aliases[source_idx]['aliases']: - self.loggit.debug('alias: {0}'.format(alias)) - alias_actions.append( - {'remove': {'index': source_idx, 'alias': alias}}) - alias_actions.append( - {'add': {'index': target_idx, 'alias': alias}}) - if alias_actions: - self.loggit.info('Copy alias actions: {0}'.format(alias_actions)) - self.client.indices.update_aliases({'actions' : alias_actions}) - - def do_dry_run(self): - """ - Show what a regular run would do, but don't actually do it. - """ - self.index_list.filter_closed() - self.index_list.filter_by_shards(number_of_shards=self.number_of_shards) - self.index_list.empty_list_check() - try: - index_lists = utils.chunk_index_list(self.index_list.indices) - for lst in index_lists: - for idx in lst: # Shrink can only be done one at a time... - target = self._shrink_target(idx) - self.pre_shrink_check(idx, dry_run=True) - self.loggit.info( - 'DRY-RUN: Moving shards to shrink node: "{0}"'.format( - self.shrink_node_name - ) - ) - self.loggit.info( - 'DRY-RUN: Shrinking index "{0}" to "{1}" with settings: {2}, ' - 'wait_for_active_shards={3}'.format( - idx, target, self.body, self.wait_for_active_shards - ) - ) - if self.post_allocation: - self.loggit.info( - 'DRY-RUN: Applying post-shrink allocation rule "{0}" to index ' - '"{1}"'.format( - 'index.routing.allocation.{0}.{1}:{2}'.format( - self.post_allocation['allocation_type'], - self.post_allocation['key'], self.post_allocation['value'] - ), target - ) - ) - if self.copy_aliases: - self.loggit.info( - 'DRY-RUN: Copy source index aliases "{0}"'.format( - self.client.indices.get_alias(idx) - ) - ) - #self.do_copy_aliases(idx, target) - if self.delete_after: - self.loggit.info('DRY-RUN: Deleting source index "{0}"'.format(idx)) - except Exception as err: - utils.report_failure(err) - - def do_action(self): - """Actually do the action""" - self.index_list.filter_closed() - self.index_list.filter_by_shards(number_of_shards=self.number_of_shards) - self.index_list.empty_list_check() - self.loggit.info( - 'Shrinking {0} selected indices: {1}'.format( - len(self.index_list.indices), self.index_list.indices - ) - ) - try: - index_lists = utils.chunk_index_list(self.index_list.indices) - for lst in index_lists: - for idx in lst: # Shrink can only be done one at a time... - target = self._shrink_target(idx) - self.loggit.info('Source index: {0} -- Target index: {1}'.format(idx, target)) - # Pre-check ensures disk space available for each pass of the loop - self.pre_shrink_check(idx) - # Route the index to the shrink node - self.loggit.info( - 'Moving shards to shrink node: "{0}"'.format(self.shrink_node_name)) - self.route_index(idx, 'require', '_name', self.shrink_node_name) - # Ensure a copy of each shard is present - self._check_all_shards(idx) - # Block writes on index - self._block_writes(idx) - # Do final health check - if not utils.health_check(self.client, status='green'): - raise exceptions.ActionError( - 'Unable to proceed with shrink action. Cluster health is not "green"') - # Do the shrink - self.loggit.info( - 'Shrinking index "{0}" to "{1}" with settings: {2}, wait_for_active_shards' - '={3}'.format(idx, target, self.body, self.wait_for_active_shards) - ) - try: - self.client.indices.shrink( - index=idx, target=target, body=self.body, - wait_for_active_shards=self.wait_for_active_shards - ) - # Wait for it to complete - if self.wfc: - self.loggit.debug( - 'Wait for shards to complete allocation for index: ' - '{0}'.format(target) - ) - if self.wait_for_rebalance: - utils.wait_for_it( - self.client, 'shrink', wait_interval=self.wait_interval, - max_wait=self.max_wait - ) - else: - utils.wait_for_it( - self.client, 'relocate', index=target, - wait_interval=self.wait_interval, max_wait=self.max_wait - ) - except Exception as err: - if self.client.indices.exists(index=target): - self.loggit.error( - 'Deleting target index "{0}" due to failure to complete ' - 'shrink'.format(target) - ) - self.client.indices.delete(index=target) - raise exceptions.ActionError( - 'Unable to shrink index "{0}" -- Error: {1}'.format(idx, err)) - self.loggit.info('Index "{0}" successfully shrunk to "{1}"'.format(idx, target)) - # Do post-shrink steps - # Unblock writes on index (just in case) - self._unblock_writes(idx) - ## Post-allocation, if enabled - if self.post_allocation: - self.loggit.info( - 'Applying post-shrink allocation rule "{0}" to index "{1}"'.format( - 'index.routing.allocation.{0}.{1}:{2}'.format( - self.post_allocation['allocation_type'], - self.post_allocation['key'], self.post_allocation['value'] - ), target - ) - ) - self.route_index( - target, self.post_allocation['allocation_type'], - self.post_allocation['key'], self.post_allocation['value'] - ) - ## Copy aliases, if flagged - if self.copy_aliases: - self.loggit.info('Copy source index aliases "{0}"'.format(idx)) - self.do_copy_aliases(idx, target) - ## Delete, if flagged - if self.delete_after: - self.loggit.info('Deleting source index "{0}"'.format(idx)) - self.client.indices.delete(index=idx) - else: # Let's unset the routing we applied here. - self.loggit.info('Unassigning routing for source index: "{0}"'.format(idx)) - self.route_index(idx, 'require', '_name', '') - - except Exception as err: - # Just in case it fails after attempting to meet this condition - self._unblock_writes(idx) - utils.report_failure(err) diff --git a/so-curator/files/curator.repo b/so-curator/files/curator.repo deleted file mode 100644 index 127c0a0..0000000 --- a/so-curator/files/curator.repo +++ /dev/null @@ -1,6 +0,0 @@ -[curator-5] -name=CentOS/RHEL 7 repository for Elasticsearch Curator 5.x packages -baseurl=https://packages.elastic.co/curator/5/centos/7 -gpgcheck=1 -gpgkey=https://packages.elastic.co/GPG-KEY-elasticsearch -enabled=1 diff --git a/so-curator/files/settings.py b/so-curator/files/settings.py deleted file mode 100644 index 8f692e3..0000000 --- a/so-curator/files/settings.py +++ /dev/null @@ -1,160 +0,0 @@ -"""Utilities/Helpers for defaults and schemas""" -from os import path -from six import string_types -from voluptuous import All, Any, Boolean, Coerce, Optional, Range, Required - -# Elasticsearch versions supported -def version_max(): - """Return the maximum Elasticsearch version Curator supports""" - return (8, 99, 99) -def version_min(): - """Return the minimum Elasticsearch version Curator supports""" - return (5, 0, 0) - -# Default Config file location -def config_file(): - """Return the default config file location""" - return path.join(path.expanduser('~'), '.curator', 'curator.yml') - -# Default filter patterns (regular expressions) -def regex_map(): - """Return a dictionary of pattern filter 'kind's with their associated regular expression""" - return { - 'timestring': r'^.*{0}.*$', - 'regex': r'{0}', - 'prefix': r'^{0}.*$', - 'suffix': r'^.*{0}$', - } - -def date_regex(): - """Return a dictionary/map of the strftime string characters and their string length""" - return { - 'Y' : '4', - 'G' : '4', - 'y' : '2', - 'm' : '2', - 'W' : '2', - 'V' : '2', - 'U' : '2', - 'd' : '2', - 'H' : '2', - 'M' : '2', - 'S' : '2', - 'j' : '3', - } - -# Actions - -def cluster_actions(): - """Return a list of supported cluster actions""" - return ['cluster_routing'] - -def index_actions(): - """Return a list of supported index actions""" - return [ - 'alias', - 'allocation', - 'close', - 'create_index', - 'delete_indices', - 'forcemerge', - 'freeze', - 'index_settings', - 'open', - 'reindex', - 'replicas', - 'rollover', - 'shrink', - 'snapshot', - 'unfreeze', - ] - -def snapshot_actions(): - """Return a list of supported snapshot actions""" - return ['delete_snapshots', 'restore'] - -def all_actions(): - """Return a sorted list of all supported actions: cluster, index, and snapshot""" - return sorted(cluster_actions() + index_actions() + snapshot_actions()) - -def index_filtertypes(): - """Return a list of supported index filter types""" - return [ - 'alias', - 'allocated', - 'age', - 'closed', - 'count', - 'empty', - 'forcemerged', - 'ilm', - 'kibana', - 'none', - 'opened', - 'pattern', - 'period', - 'space', - 'shards' - ] - -def snapshot_filtertypes(): - """Return a list of supported snapshot filter types""" - return ['age', 'count', 'none', 'pattern', 'period', 'state'] - -def all_filtertypes(): - """Return a sorted list of all supported filter types (both snapshot and index)""" - return sorted(list(set(index_filtertypes() + snapshot_filtertypes()))) - -def default_options(): - """Set default values for these options""" - return { - 'allow_ilm_indices': False, - 'continue_if_exception': False, - 'disable_action': False, - 'ignore_empty_list': False, - 'timeout_override': None, - } - -def default_filters(): - """If no filters are set, add a 'none' filter""" - return {'filters': [{'filtertype': 'none'}]} - -def structural_filter_elements(): - """Barebones schemas for initial validation of filters""" - # pylint: disable=E1120 - return { - Optional('aliases'): Any(list, *string_types), - Optional('allocation_type'): Any(*string_types), - Optional('count'): Coerce(int), - Optional('date_from'): Any(None, *string_types), - Optional('date_from_format'): Any(None, *string_types), - Optional('date_to'): Any(None, *string_types), - Optional('date_to_format'): Any(None, *string_types), - Optional('direction'): Any(*string_types), - Optional('disk_space'): float, - Optional('epoch'): Any(Coerce(int), None), - Optional('exclude'): Any(None, bool, int, *string_types), - Optional('field'): Any(None, *string_types), - Optional('intersect'): Any(None, bool, int, *string_types), - Optional('key'): Any(*string_types), - Optional('kind'): Any(*string_types), - Optional('max_num_segments'): Coerce(int), - Optional('number_of_shards'): Coerce(int), - Optional('pattern'): Any(*string_types), - Optional('period_type'): Any(*string_types), - Optional('reverse'): Any(None, bool, int, *string_types), - Optional('range_from'): Coerce(int), - Optional('range_to'): Coerce(int), - Optional('shard_filter_behavior'): Any(*string_types), - Optional('source'): Any(*string_types), - Optional('state'): Any(*string_types), - Optional('stats_result'): Any(None, *string_types), - Optional('timestring'): Any(None, *string_types), - Optional('threshold_behavior'): Any(*string_types), - Optional('unit'): Any(*string_types), - Optional('unit_count'): Coerce(int), - Optional('unit_count_pattern'): Any(*string_types), - Optional('use_age'): Boolean(), - Optional('value'): Any(int, float, bool, *string_types), - Optional('week_starts_on'): Any(None, *string_types), - } diff --git a/so-mysql/Dockerfile b/so-mysql/Dockerfile deleted file mode 100644 index 9f349b7..0000000 --- a/so-mysql/Dockerfile +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -FROM ghcr.io/security-onion-solutions/oraclelinux:9 - -LABEL maintainer "Security Onion Solutions, LLC" -LABEL description="MySQL Server running in Docker container for use with Security Onion" - -ARG GID=939 -ARG UID=939 -ARG USERNAME=socore - -ARG MYSQL_VERSION=8.0.32 - -# Install server -RUN yum install -y https://repo.mysql.com/mysql80-community-release-el9.rpm \ - && yum update -y \ - && yum install -y \ - mysql-community-server-${MYSQL_VERSION} \ - mysql-shell-${MYSQL_VERSION} \ - && yum clean all \ - && mkdir /docker-entrypoint-initdb.d - -# Create socore user. -RUN groupadd --gid ${GID} ${USERNAME} && \ - useradd --uid ${UID} --gid ${GID} \ - --home-dir /opt/so --no-create-home ${USERNAME} - -COPY docker-entrypoint.sh /entrypoint.sh -COPY healthcheck.sh /healthcheck.sh -RUN chmod +x /entrypoint.sh && chmod +x /healthcheck.sh -RUN chown -R 939:939 /var/lib/mysql && chown 939:939 -R /var/run/mysqld && chown -R 939:939 /var/lib/mysql-files -ENTRYPOINT ["/entrypoint.sh"] -HEALTHCHECK CMD /healthcheck.sh -EXPOSE 3306 33060 -CMD ["mysqld"] diff --git a/so-mysql/docker-entrypoint.sh b/so-mysql/docker-entrypoint.sh deleted file mode 100644 index 919b27a..0000000 --- a/so-mysql/docker-entrypoint.sh +++ /dev/null @@ -1,214 +0,0 @@ -#!/bin/bash -# Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -set -e - -echo "[Entrypoint] MySQL Docker Image 5.7.24-1.1.8" -# Fetch value from server config -# We use mysqld --verbose --help instead of my_print_defaults because the -# latter only show values present in config files, and not server defaults -_get_config() { - local conf="$1"; shift - "$@" --verbose --help 2>/dev/null | grep "^$conf" | awk '$1 == "'"$conf"'" { print $2; exit }' -} - -# If command starts with an option, prepend mysqld -# This allows users to add command-line options without -# needing to specify the "mysqld" command -if [ "${1:0:1}" = '-' ]; then - set -- mysqld "$@" -fi - -if [ "$1" = 'mysqld' ]; then - # Test that the server can start. We redirect stdout to /dev/null so - # only the error messages are left. - result=0 - output=$("$@" --verbose --help 2>&1 > /dev/null) || result=$? - if [ ! "$result" = "0" ]; then - echo >&2 '[Entrypoint] ERROR: Unable to start MySQL. Please check your configuration.' - echo >&2 "[Entrypoint] $output" - exit 1 - fi - - # Get config - DATADIR="$(_get_config 'datadir' "$@")" - SOCKET="$(_get_config 'socket' "$@")" - echo "This from command line $MYSQL_ROOT_PASSWORD" - - if [ -n "$MYSQL_LOG_CONSOLE" ] || [ -n "" ]; then - # Don't touch bind-mounted config files - if ! cat /proc/1/mounts | grep "etc/my.cnf"; then - sed -i 's/^log-error=/#&/' /etc/my.cnf - fi - fi - - if [ ! -d "$DATADIR/mysql" ]; then - # If the password variable is a filename we use the contents of the file. We - # read this first to make sure that a proper error is generated for empty files. - if [ -f "$MYSQL_ROOT_PASSWORD" ]; then - MYSQL_ROOT_PASSWORD="$(cat $MYSQL_ROOT_PASSWORD)" - echo $MYSQL_ROOT_PASSWORD - if [ -z "$MYSQL_ROOT_PASSWORD" ]; then - echo >&2 '[Entrypoint] Empty MYSQL_ROOT_PASSWORD file specified.' - exit 1 - fi - fi - if [ -z "$MYSQL_ROOT_PASSWORD" -a -z "$MYSQL_ALLOW_EMPTY_PASSWORD" -a -z "$MYSQL_RANDOM_ROOT_PASSWORD" ]; then - echo >&2 '[Entrypoint] No password option specified for new database.' - echo >&2 '[Entrypoint] A random onetime password will be generated.' - MYSQL_RANDOM_ROOT_PASSWORD=true - MYSQL_ONETIME_PASSWORD=true - fi - mkdir -p "$DATADIR" - #chown -R 939:939 "$DATADIR" - - echo '[Entrypoint] Initializing database' - "$@" --initialize-insecure - echo '[Entrypoint] Database initialized' - - "$@" --daemonize --skip-networking --socket="$SOCKET" - - # To avoid using password on commandline, put it in a temporary file. - # The file is only populated when and if the root password is set. - echo " Creating PASSFILE" - PASSFILE=$(mktemp -u /var/lib/mysql-files/XXXXXXXXXX) - install /dev/null -m0600 -osocore -gsocore "$PASSFILE" - # Define the client command used throughout the script - # "SET @@SESSION.SQL_LOG_BIN=0;" is required for products like group replication to work properly - echo "This is PASSFILE $PASSFILE" - mysql=( mysql --defaults-extra-file="$PASSFILE" --protocol=socket -uroot -hlocalhost --socket="$SOCKET" --init-command="SET @@SESSION.SQL_LOG_BIN=0;") - - if [ ! -z "" ]; - then - for i in {30..0}; do - if mysqladmin --socket="$SOCKET" ping &>/dev/null; then - break - fi - echo '[Entrypoint] Waiting for server...' - sleep 1 - done - if [ "$i" = 0 ]; then - echo >&2 '[Entrypoint] Timeout during MySQL init.' - exit 1 - fi - fi - - mysql_tzinfo_to_sql /usr/share/zoneinfo | "${mysql[@]}" mysql - - if [ ! -z "$MYSQL_RANDOM_ROOT_PASSWORD" ]; then - MYSQL_ROOT_PASSWORD="$(pwmake 128)" - echo "[Entrypoint] GENERATED ROOT PASSWORD: $MYSQL_ROOT_PASSWORD" - fi - if [ -z "$MYSQL_ROOT_HOST" ]; then - ROOTCREATE="ALTER USER 'root'@'localhost' IDENTIFIED BY '${MYSQL_ROOT_PASSWORD}';" - else - ROOTCREATE="ALTER USER 'root'@'localhost' IDENTIFIED BY '${MYSQL_ROOT_PASSWORD}'; \ - CREATE USER 'root'@'${MYSQL_ROOT_HOST}' IDENTIFIED BY '${MYSQL_ROOT_PASSWORD}'; \ - GRANT ALL ON *.* TO 'root'@'${MYSQL_ROOT_HOST}' WITH GRANT OPTION ; \ - GRANT PROXY ON ''@'' TO 'root'@'${MYSQL_ROOT_HOST}' WITH GRANT OPTION ;" - fi - echo "This is what I want to enter for rootcreate: $ROOTCREATE" - "${mysql[@]}" <<-EOSQL - DELETE FROM mysql.user WHERE user NOT IN ('mysql.infoschema', 'mysql.session', 'mysql.sys', 'root') OR host NOT IN ('localhost'); - CREATE USER 'healthchecker'@'localhost' IDENTIFIED BY 'healthcheckpass'; - ${ROOTCREATE} - FLUSH PRIVILEGES ; - EOSQL - if [ ! -z "$MYSQL_ROOT_PASSWORD" ]; then - # Put the password into the temporary config file - cat >"$PASSFILE" < "$SQL" -ALTER USER 'root'@'${MYSQL_ROOT_HOST}' PASSWORD EXPIRE; -ALTER USER 'root'@'localhost' PASSWORD EXPIRE; -EOF - else - cat << EOF > "$SQL" -ALTER USER 'root'@'localhost' PASSWORD EXPIRE; -EOF - fi - set -- "$@" --init-file="$SQL" - unset SQL - fi - fi - - echo - echo '[Entrypoint] MySQL init process done. Ready for start up.' - echo - fi - - # Used by healthcheck to make sure it doesn't mistakenly report container - # healthy during startup - # Put the password into the temporary config file - touch /tmp/healthcheck.cnf - cat >"/tmp/healthcheck.cnf" <8hOK^9G0AX==cXtRLBqUgH2<}dBhXe?*|NHNK zx=;7i)O62O&8dFqsp>k@BX8^=ApkXE)iL_iH-SW<2mbs2V$V{ZYAq~+FmYu9L{t^_D8poWEz-lTsl|b3aQGXaCJ)Mp86!1O&_;9DM#nExu*Nk z>TPZ|XY%xWH}U}2Vx6q1HhCI2qgO|}P%q;%iya=HIyjkjcsz@@eb_O)(Hz?q0zNGF zhoSs0u(co%6jhLgDk^qM9#yW*oRu&>UJ~}Hunj)SeC2n5fCfI5Fuw4jj;aHI-vLcc zI2WJFv8-|(yTZl+)mjOdkA(^r1ORmKSyNzhb2Gw@nJ6~GC_rTCf86t>!YF`J6kr%I zA5HGR^?&$}Kwhb~IQ2i5>i=w@IQ(Dp|I7f^bQrO`V<|kWy5>weKTqwIlC-8>sqhRu z`adR0K}VrsOG!?VOKi0s3w}>+%XS_H`tVd_iZCYj?>ax&>QZzZ1?nn)$PhbN6)PQyR+rS#RS++2urkzC z=%@)&In4bKzAu2;2;&2han(uqc!~K`|3!?C-jRk0COD!DUV%X^4L9}J62SKD!5r zSd>&NVH`R;WA6ApyRpHpf>|$UC)Rg}3@b+k#-L7+l+AhT-{Ho}Ugv`Tf{7CbaEb(| zB`RJ7ZZPIqpqiJF&{<)RLgLra3DmD+?KR_RB_s8OVO=&Kg0O55P9^JwaDH#YZ5RVQ@ zv!;|KkuM)F6NQ*igq5;?ZK3sf9y|T#q=|e^RcxOsLB6ceY7y@D<#A$f5QNN|3 zWQN7h^LY3<+mj$spn@Hm$ziikxzwqV!q{j*k~@>AdmHdvw1HP$8I;wst%63>+L^Tb zRVm88<0cXupB)c6Vu5b}KR*eU0fNcIb1;EG6_eIY)<3@dCDIoNqvs9*bHVM*_3a z7sTR3jpcJjY1B1r1?WS@%UgdRaZ#`{R!}fD${8l=0Z0=_pd|67u@$5wu_G1&==OJb zmQ#eQKm?UVJvVC>7sKP%P3)l2tPp?(#SqXqqRyL!5Vzu#2_uAC4Ps_7^ z_S$^^x#98GV{Cn+eSyo&*}>cAor3T7raie~quC@zID+jQ7WMj!=~xq#cydM-%<(>Y zkQO77M2pf^dn!W?fRuy$Fyh)!e~X1O_R|@Av@@<$tr0~z>O^)sEDFRTlEcuT5cgWC zQ3w_tYsFq3m3p@2q3*))C0&)?Zm!S}n2{jr%$QCVb&*A-=Y|u>qYRquhmp20!#ny) zmaY-wZL!z@mT!Nt{15HgCbcW?(`W@z`D-{@;CJ!gp|rc9s$WXr!z|SE46cAuW#lIf z=x=*WFY1@BcWPw;+oy`fK9C!dum_mG>oBj1v&gT6JV?xK2r*8YhT<#D5`_@$`=18G z2SbfJ`ldgzBIZaI^={YZGm6WeWlPkaKa^R>@^XNf!3&OME6g*j}OT2_jRrT^N; z!1JGJHScp6>whTYJM;E(zvobLh0noWYMgl>iw1Rw@=3S3t5=^t?$ii}8*!O0YLcVl z*^C>01nX=pw5s|WJvJJ~H4Qa7bhv!fVIz)x({mjp2fd7#2?F0MtdFYZGFNdCl)>IL zzCIg#(SP@0vF$Zs-msU=bRBY=5|}=&m`PYWkj}QeWs3CD^juRJR5dAnRZI`O`j_~~ z_3K}|?_5Ly@LgDlp_)HM!(V9(sI`T_@Z+;bbue2!={#RqDu<0bT(iH;#0jXrV-Yyv z_$pt<<6h39qZn{EgM}-X3eqikO9jI7q+OWwo7w*T{`N)>FAS3V>aH@HZZdeD{lFZ5 zrT-ETRi<86WndAM6O`a4TDiM$@WPJOq6O zD9Q#PXT=<6-$lqmpnF5$)ss?i_1ky$(BJq|d+|q}`A$_$tu%|UvS0yUT`rs~?}Hm~3}GAN;va$@=>I zB)Y!;V1IG^_flusQb+3%x9Q{I+rR$o^Rq6l>*m^o>^}-)iy#K`M~K7b=;&`eZHie@ zT5o*KE$KCf!3~CMYa9N{I9}UaS~VRI5#FJYp9sZ03s5uYE0%Ox`0b_*Kf&oLSz>ex zXu;Ns2%CX9p&`01cgC5*t%5`k2jcU%+w-G2lJ?StHZ{8YV&t~^j&a3- zv0r{xek9lmd{dB?^xErnQj<2Jr<}Hb7+D;8)Ww|aKUw~wy9^^_jr!fh=Z}uN=i?Jj z6nhry_za0bc>Jd)yQ9(s5BN0(EF#~Cb3yZ8asUx$bZ=ceuQd7i(B=3CpMH^62?>0g z2{+Bd#iwmN{ibEh$~YkJRrbY-(}lN(AKtVl-iK0 zjb^AtYLe4T;|(o+Pdk8N;rvS3_cIF}$}&%#e8WI1GrrF31^G#-TRRom$JBeXzN(|| z)01()Ldkrm!5?F@!S7Qb*N>EWSsKpx7{5O0d_FavEjvTdsclH5)6_K1#ieo1{`9+x z_;hax=lIt5f|;5Ds>IME#b^F~9`OCRv2#uOqtVXS{+}!b$i2z0o?!>)RO6Md-h1lC z9_4pIu1H+J0{qxJ4$l&F?U>TKwns`J{=NY zoZFYe9p{E~y-YXCV@shLCpklc4ag4=0DDRnwpNY9!xv!MY^3B>u$!K}KxXpNT8q%d zfsa?tp%#EZg6Bi<4jzFwvexB%LLC%XP#_B4mWzUo(l8PIv%$<;Wn zLb2+D>;*pTJ*Q&Xvk_~9WZrlol-Nd_a~Pa%6qL+a8=w}WcAzg%?^Kbadf(?^YQ9Nh zr=U`|k|eVf=)##-H`%Bif62}HXj1`>CNXfitdmI4ZCaGCl|Cb0sfP!79nLH_b@hfN z>FIp!?D@T&ZrNm3)U%^B)HY~9sn68iB4zwqkUAUv4ap6dFlx@n&09?cFyKdR^Q{8K zV^8HB_N=yc0oC(B3mb3T2#vcV7IOHt#h;{dhWPYtq8(S@a0LE>UgL%bhdh;OW7{Q} zEIYv+P~FWvE*f8Pa8B1vP7+w4Cn5+7!%*B65IaHPM=AF@kp4-uVL0W?FnA-6l4HJW z7^{VLmwoSl(}iW5=ed1tBcGGyMAk5Q{!7D1(WQvJdvHx8o1zTN`C-e*bG;i9NQv=C z{-NQ$f{E=^_WiQhM>NcdTpApaxh?NUuV$8{MGXGg%f<_)glnahwr!^O z()KDR7z~oYlw#Y#HBxj?J!?bb2ZDy@>U&f@!9zG-pg=~tcP<89GS`Ad$EL|aV~dDX z{sfT$N`Y_`gh)?2%Wb9XXE!mC3YltW33gZQ2bw)oPep34vWR6%|d=TdN`TDe?*L6OIdM$k31(X$xwRzzEF8^wOZhfL#jpLQt?V+n}nU*Wn<(_TFx8e#=kq%S#e@{iS|Fxq>)PS%H$^rnpv8xElHWUDC#^N%ld z3f&WO(&E{x3o9Tg=a0g7LSp;kwAG=?>Uyq?uTiv*a$9L|(uuw7V{kO)UMeft(p>5Z zbiLhDU3Iy$ZMyQacIwcL3iD<&%|6U#*QtlB^z^Sdi&_pg5eNn)AMjB0+0>^EoTRjN zVmRi&rAGBl2?e;(3n;(agJy+O3lv83pPh$hi$c;^h(r9kn|$v2rbb6}1qilC1w3?xdo}5ot;WyMK4l(s1ic5ch>}5-)8&}cO=&lp zdhL@I#vBNS(BtHgkXaL2r=cZ}sf!s=+o(!qG(iCi@vMC4-E)GncHk#Jb$?##zcPmW zM_sf07iuw?ZwdMO6}nSHPP&-|#s)9i^s5E)21g>kKw5D2YEZK98dyP@VraVU9-9;b;@3 zL(3W>GyIjqRH-3G)lNmmtFHHUzwJ2S z5l>sG>@b*dk~Er}zaGf0G0u%Y<|7b}mZ6`ah1n-v=U718mGL5Qc(%mC zvsa>T%6|5ryRz$_x|pmCSsJ5xQk@1~US(~B!laCq*+31JlXTu@s^qk;`amP&qATC* z+{Cd%p!R^KVy?WVSd*i({vPVTK@9GyL}#ZdG+8L++{#-we~Z|O8?m6! zhgj_7UkL`Tk9884l9+sLt9^Ixokm`fUpzUrOia=} zEhv9oE%p!RT@`ixr}OX*Z+DLd8q3&djR9ulw;1Jqz(etqLqnc){5)6HeC;9`5J_W2 zA^Xs``-vf(Hriiu@W4ym(e3=}Ag>Jf*NC1I!f#S;OV_`)SJ4V`hHX=Ab&0THd2x9f z%GT%V*g@sm8p`63ywZw1ahH(yjaO#--&XEzPI0VR-F_@Lzs0)wiqq>BX})OVFRi{7 zv63?8G5*uQooDp0@AE-Lt?Rzi@Hf`hR}!6g-nXNF-v*4BGQp z$R+^lZ#%|)w#3XJEw-jzo4YD6vfn&gO0~Jyqxxpdz%*n_@IuEX0DDYslCMv~zHV+e zTKy!k@HUvm22pqF%rHQ0Wa6#s-UBMLg&OdcY;ZA@@q_QT+eqRWaf=ap zwIZe`dF(%5Tkg*(U$7I;snrKxaoJ$Vmucqxhjb<8y}j#H0!Z7qlmcn_RXP{$RjblY z3c6s2f~tdyQ%%ffI1Vy2%@_FLk$S`lIp6OcUMsU-h<|#c%aX!7=ZdUDt(ZkRELMcDi zOTXIRFGlYnT#0$ZM*Y6rSV}Am+Or^$EJ{n@5Vk3Lv zP&Z2o`+4;NF|zA#uWwDp_$NEMHph2%OW7+p~%hngxJPeqM#8PkZTqmN7)e~z}Q5YoLb5h z4$OVs47#rM4X$9*NB=fVf6sj>skn;;8YXfjFA8&PJ4GvC5cMI0jdquQEuzJVVK==l zT#Jm`=%|aGkJfCPU#n~TWsli4(H-$Z<+kyA}FGHPl5y(l&E9!mzjA9$`PPPu})@y_!M+r-ck(&+w`AU!TDX z{B(Oo_$Umr`#6QQpjgz}B)xk8URoy<>=hKY2>=9pWaQP3NIX}}lyIXlv$0i$5!n|D zQSqaTnwPb}W1Y%&wzruAGXszjNE;S41bwGgZ7i*da(W@%x7HlhouIKR#*Vt_?y@iz z%^Cw>39_+oSL(Xa-=>=dTwrk}RWyqpMAl6?Fl zc6?z?5reC`uUTsc6dUmpBDd?tl=E;*&aexvgP*Y1Evb$4UFIoTyV5;q1fK~k=J+=? z{%8YAA*DjIGdpNLSQZ1<-jwXM%;Q%4Xa80 z-|NMi|K3CgKU}I4K8Mr@LdO;kmY%v#GFNx{@)ZgOStD?Ka{uI(X3rLdWfTM$mK(#` z3H~c?C>(tBZCjcb5fM!FR?o2{pF|v^{lRR5?qr%Q$x%I zb{@VWA5iho=~xJ5f|)G$OHu<4?jo%slxKI=tfS|aypdid@ZEcv!Nb1{9eoKQxpXoj z&E+npk3Qcqx7)R1>ex>#-naLE&(SbnNtJ@3ZxK`KfM`jPyBqO2YfT{2cYZKn?|@71 zFJBsG=LYI%g~by(xrlb19OF)WtUiu-Xx&%{wt`yMsLQ{*#aGIC1QW+D-xkm9pY6iH zLy_IuSdOe{jwIsJi^jte_}i$(yNtu?w-J73EY^gFdgLtCn@({$@~mdWY}2AuevFTI49D7ptY(MwY|R=bbzKC@5YGP+0^fusIdZvvJsXesN9po`yL7*b7l&pp zg!=GDb=BH5q46H6lVM4mIP(ngiIAkVCPuzFPi;B{s5=9{tNPZ$~QVyblTYahRT zPemsyQ^&7dbJFj+UXU-YAYtb|J;I19SvCI-|E7hDidhuH$L%51Ws_?mtrgLHDRAv9fL=?9|q?#@exKntja@lWEFMX3GR-@ytRvxLN@wdA%9rnre4EQY%rXpItGUhY^L?p9K80eh6)Byn9yf$Gq4Q@>m@$4* zBg9r4W9oLC4ylP*$jbOv;^nGj^kp4n;zkoITnzi}T>@NgF9Qwmn2wqxdDwb>n!d{& z;g;zd-lr6fq3a(1>-bHwxo_|3m-bhcGXkZfKdMZ**JQ9rxBxQ%2D3uMa~syyb8@yq zHxr12q3{+Q6g7_^PcH`QC>70-d{$2)RtQb6bAE|na#h`g;N3w6&2ofemjU<2a0g6N zuZ~deXh1Pn!iTXpmz-;(kBIKjecE^LXj?Hfg; zwio+!IKePr4U)gg&5Ui(fLe;j0FQ(?^YQK>tWAXENL+w6Lr(ifW${J_do}^Nn=XF? z?+~A_uqf#|LR>Qi!{%=ykC4JU(`cyD@b%aiY~LY?U*d~>t9_*bHH<*JU99}G^!n(8}h?{`G{!@s-fXn+@0;pxH8 zoyiN9+LhQ)Q{%z(@kz;j#Wej96FZrk(3Pv7l=(|s3K>yhU7C3iT}}TPf?nbt$JyG1 z9$4Vz82%Nyvk$&GruHv2olsJH~YcLSkro|qfY z>)bO{!0L!B;BKbK)y(PHhKXSgxI9o<Cp>ytdKR zVWWPiJ$xZDQ(C7Oxt#&9dvPrGwZ!4`B%eB!h13j&$xgH1yhX01?fk6NdLm*rJ*<&%k*vndl*re-Pwsxk?yP`U)UhB`L? zprQ54z7h_(Yjjr>zIW(kzZmxNn8sJY!&6OSgppIwdUc3Y%5`ki@oA0-ZPVF9niN}= z^I3I?3k5u)ok7R|)ROeDU=rPE7V8Y?-vkkC?l?;A=(CJ>Drtw7tNCt`i(F|w!_AB) zG}Lb}e}rYm#v2E!RA&?{A0D&{b8|ONM~EY{$ZY*vDIyOpHNw6p4IX%FNDdi}NFWn$6k~xBaiiAEQ)|lW+%xlALYc7H8sH^+z7B364DU88=#sa|6^u{soP{-012*1qCyS z^;sW5#b?PW+0|QywxOl|Gpq+^=!Ged29x9YAC}_g7|^?bM1?vu?-$;8ywjKQt>HSp zlk<)+*<=#N$cUR+ty_YRKVk?)N49@8_QO?D%wG@6^gR2#`shzYaGGNLW_Yg6KwG$Iw?6^?=@O)_Lgn$RVi7-o6wFu$57_H6w{f^q#i`zwdBMEs1{AU@Cc3&XwI zZp=C|FgU))plCAOIST1*7zSIvF2J%->$y}0c6AabqEn?do?7((n?L!DW*|EJ+D2ZD zFd$f_xTNjeJv(3T_W0>@W^4)X0vwP{6}{Id>4xh%!IG_-@o{tozmg)WZD=z!j`Po*Zl^G*n$S_;W%(Aro zjgD>560E}6cFTX4cNpbprbMcDo4Vt>(po-|~+@T}{ z7bnsg@m1`kt2;#d{F%*@_7O>T)C?qL(ALfj%ZjI{CRqGBW_`WoV*Zh>c)pHdw#REY z@cqfwa)K(Iq>sXdN(AwW1S*&X%Ucnh0js-2JM+BATbyV`xYmLZs6eGofz3c=dk{`S z;smFu6Fs&G3#+f}+kzR6B8%8ttI`zF*##^r6>oz=6YFB1YCu=A<6BQPTU7K*!8b!P zA~j1KkE4PH;Br{cmNet*Z(KdmAr?`Hwwkg@^C);fQqC|NpdC?)s!LwyG}Gue=S!b7 zZJEv;dT{N3JpxZfB!aN`hX~TTWIsNA(tLZ768A&X&A|*%`V-~vyAo~fY|1sxSFz&` zuUdNbSc|Bw+<{CFFQ`XkI8`gTTXD|!3c-JUEWvC&uS(sC=uY|Mf;j*A94Tz(H_Mqt zGWq#-&Xe)Tiswdz7<39ZPIrxBtu7*nPsw+GAJE_ zyF%V7fbIKPVPT+{eqAcaw_gmuQ(Yvr)jS6e=N#W7eg?(OQ7XYWEiW#kdPzrGZpu7Z zFQ@-k?CZ579wn9%jP5@!^xDtzQ@_*KVR!8clmtK-O{I};{SRGY_tSf66bw9#G8u^I z$oE!TdgB$K&IF*jwXJFbFQCaDr;LEeMn0-iv+`(MsXS!06w{dwL~peA-sMhQ0QrsY+#aolmjBbi1GLdqX7S@E#jin+@vrB7qyB7lR! z;}mH$!Arv2LnexOAu1g0==7`IXg7gq*s@q;vF8OYw8*b^ zK!U_Mp_19=E#c>Xt1r@_Oni}lH7rsIZq`EyvwcWO2RxqYwL4z)mFsIMFzonO z53c3fBb2I(G^(|eg4YwJU00aGZ%;R#wL_GNtTduQ(JNa}*j9}Rav#%rOO0rHZGfq< zNvBqNC;J3Y(XTEqsEz)Vz1~w{bj%&x#90cexpnrJuy<&zXv3Rit z9O|e+j_d%M3_V!riKnh2IHj7MNUVbpPes54k3|FV(=X)66Ar96MIV~>DSUptB;ix; zfqF``BcaB-WBP;}WWis(>ZT4%{I<@{ecK51lOqc7Hq%4YO!>Rv z27DFbJa!ZP+DlPjnjiB;;Q7r%ZaeQ~>3VVW~NM*cpaTqc`X-YT(bbd~zWeZro430N(j+AbzMlS!cq;nC<+v(U5!0SZ#7E)gy2} zZ)#=_95UN(0|g;(H>v3XNlJH?2~QXX_NNe?(g7<1Or+-d<7tO-o%YIvma4n~rMfh; z8(3l@4}OaQXh5WPwhLFlG0D|ZN1iCjQI`s= z$^BGQ86x)h?xEs<(#suXgz9AV*}kjzChx=i%|$3nJnmKR#uSGS22TQ_k@(8H#9YZX zx3btWbXtql7nKN4t`ydRWwl3W>+s$}TljX7xD|3)6KXbc$V=1zI@@}@62!v`uXJW$ zpFg7pJ&Y?S_9+F(5;=At}J)7D05 zg{f<*SEcE(!%<>~nb8pj*Lx9a6X8xs@xjhUC5UBRkr zlvu1^ZeavdGa~VpG^9+_D7TeGO$q3g=Un69CR7%vo_~5g20kOLE|j^U5}NIS7~yMV zd?Z3vBG|+*u!oR{!MgT(W7Q!|(h>FqK8BGcZn=DPJX)NvsO9pqkHp{NHsW;Oe;KQE z1OtD^+qF1`xQ^@Mnzh>`r)lO^XQ9RqkhIO8%s+2v(6F zsu6RNkmEH=|5BsC-d%tYF z9OoL?mKthi-gJ5;{+^|!9ciE0t5txPC0f-m4zh4YskPl_RSgp|#)uuK zD`of357fmcO$Q^!4Hf92*dm%U8D{vgI|(;SM2TlS$4GADf{GMTbE;;Q@GZHzKu>OK z^ArEsp!blP`qluko%Ef#SqJg@(F-cx(s>2*gdwYV9vs!Xwx$shUv_|cahH{8!W1;^ z5n-gxy&h?bg-hf~V!{0e^A%V2P!%Hc!Isxueqhw773rP1TC?PJ(lfLn;>tGLhGG(G z^b|z>*(@&_V=!F4r;CGlI#Z8-Y8;izU1$UE!Y5VEB^)S_=)w^mZ(NJ`F)Z}qoeABP zfT%ljQ`!E~d*O_%!5l%89uhLgU2j9;7;sB&p>*calsSdko;M(!5zhu?5e5$QD99@Z zw5gHcWkwpaq}NTP@V`)ktp?k$M|t#kM>tI^ayPxb=mTNQlxVDdyo5?0U58nY)`c58 zj-X!0f{~H&HCkIVo!0o(z)?Kg-&Z|n!wz`LIYy}G%Dy7;hYM)~JpK9rKIzFr}uP7zC@q=FXS zASyE#KQh3l3SVMP)UY>svCHyqLMC3bMIZVmdhFQ052pK|!xl4R{2 zvULPlERFN>e(xwH?lR;F5tH9pC*Nmw z3{uXZ+)$MnMe_wC=_S!NBo7<2@_19^| z(h%$in13InfvzW*1x}wf3S>hs@3EH{9S{Yoc|z^n(VMI&RT3v_bhTlL(ReBxL;_l| z<;<{jW^q+$Gi)Jfpu`aPcPeb7)arnz!@?MYBssfM0bMGW z-zm4xfakSlI*D>Astt;uJo8u%hFy)RP|ngX<$UTn5>vvC7g)gXl*hQG47TLW<>QjsJCp13=D?X=>t`8t-UUXLw^8Ua{y|7 z+(QU8%(#q(qGD&Om&XG4qk&xq47NsMO31vM3aa=UQ#<)%Dg-qkc@yu&_iH$wj@2Mf z!F=WbS0hiMVXATryAru3S8ae@bezXHCaqcwzDd9vjcIyO!*|ceMCzYHe-8h`tgVic z4V4rs+&EK}8z{RJ{I7Xdo}LJ~ZTy{IjV#=w119w$&O-*{th2G|8}0q~IkMRWPY9p(7+wwbU^2_Ws2nM{9$$!d4O_0_ zfmr17C%t1b@3i%EO7Uwy?SnD}yi4=??1Nax;e~XFrk%tg;(n^oNLazv=w9MSvE
P?YnsSVk#Yi#jJ zr-*EUFX7orf10Er{3M3}0~EX3Tru2A-7JVKdYx$>4E%fX(u&k#Hl4g)E7A+n63}rF)%4VI@0tDmcTK{oaoijcUL^4Igyh|LW%xsOCBCZv)`QLp~F;{d4|k* zxcUKrm29G2V5htzL$tcXzL=ns~rR$P^? z6~pyj7EZ7zKc#1qv9-g7wE=qVLe%t-E>uT$Qk02#CEiM)nSl0Xy6;;wrBmDGoR(FE z4ru1=7npejZ3Nj8j>Y@~PDBE-!nold#T7-dDYRzAVwU$sWd!COPzv*LQYdVMxPC*J z-WZ)o7V2Oc5>2+mlVCd`En;4iRBG|0Kym6p77-5Qnkq_28!oJob^$I(2Am~QctWYp zkmF1Ur%1wME6`A#NNp(NtIZGh)=L_7Osu!*XabrfSWGNLoZTEuL_~ue zir=zV`D9ImsIpvzGDo7QlD)mr=UF{ zvTLI!i=&6nXRLXq4qmBZ_7rn4$MUbl@e9Jkm^jHTUo*2HY%Fw*8So%5oKhh{gsC6} zIvko?fo&<}|_N@=OT&t*|E}ba81~Plwk&7F%x#?Nwf4KeJTqVEULsrFxoS7spe!$l#TvVop4eg zy*sgVgEl!iaoo{^P+yuZT8U!eUsTpL!byOKf*4tK%G*MtO(KY`q zO~^jm(tkP+Lr|lFIwH#`Q|HQ8>|8I<_){uuWjnHlmQ$w8zYo}tvxurqrAYenr z`tev`^3vNrN7p0qO9bsW(hY2(IQiHvz*7p=GyOzJrq_r&H#F7rwQ%5eTA1-W>Nxq@ z!JQ#N-C6XXMVJOAv^ot)g2gY=VpP|OW!5u+DJ^Y75OZ=Vo4I>+dlg(zJpM9If{jhS ziAeJ;@q4o>K1KA>n0v<;=R$^|yrJp({k}vtOZcnK`iZn`n)KjUZLM#mpiu4pFd^+* zLu{9COT7v%j?C{bn61!ht8Lqmp-D@<;Zi5;|2Wd8v-nV4z+;s3RD5Wh)wllK-D+-n z$`{cDhvKQnmgwcJxTT6^jB}A?TvK$uo06JcbIT@KIDUCJ^RZz$LYVBEm0T5iDG>o$A29(41HD;DM@2icu|rk<3^L?wYhV6N))bhV1^tS9fa5UMY5t1l(RH{(w zf@lVql?=J3fxahBAs>{k@G|)+f;Er-U~l|RijQumn<)2ZyDQjxH90`cyNCWz*3XijxLLaR1Fq<=Y#TAA!lq15+7DEHCA7DTK)6p<#G& z6dP3nEcFc*scw1Nfa5EfGW=x|9whSU-X=} zWZAsS8WgD9CF+9r44Z4QSS_Vi@eJjL6O#qCMGJ#dUEV4dOo&|aT3n{`k}!qgEAgx1 zjZTxQu2VOY7;XF(jc1srF0%ts#YZTCEst!G-8$CWPz#wa@?-lAoT}MCK9oMoxafey zj8vWOP6q1qf_zbK3MV9Fh?z}?5XMRp#>kvrP1yG>V6j4r83v7SQPiWTRJ6iH%>%eI z6L~CpWs;%6!{&<>y=IC~;btPFJLD)4{zYRR^h{$S9KyKD^|)Sj0Dz#Kb`~!xw8xeS zX{&kl8+(0ZV-jiGiY*qwXhmiDV82ao>TpUgXp|ce_&HWBxw$wbJ&S21Hh(cSEiG?k z^Ob=xb3j_#+NZtfsDw0zjr`DtQM{fjA^`jn_GZqQEiIaO@y8ln$i(8kYVm?5&Zq3QH& zaiutFYz08$d@v0Af|fV=5Z-PYRZ4K>m5{e4lca{8?OULwkIaUAdBi3JLG|<3x69Oc zyMSa>Et` z)#VfBw7A<1BtK8g5;q!IPR>mVjd6l=9CE4fJ(-71d}N*RmhCa6X`bUoFVWa4IFl%kohVy3%Rf=P{oA@FuD}PW0Z7-BYiEpf5S*pfKsu>$ zc!VE^o1vV6{;R6fA?<_WTWdRmkSeD2i|`j;W;9_X$DMDH_xs2!<(2U%y8@wWxyxA{B&iT~}1Ixd0iVVv!kt2pn6ALP?T|Ato9E`Of(R;kcGEQ3x zP4_P9R@Jw!D5$0tQL=h1ZBNieF)5u<8_8@d+MU}}m_=X*Dgv5-d63&}=yqzY-$aj~&$Z1Nt z28qKiRNID|hD~TTI8>=@?FkzTHHlP7D}$7>uQk4$gf-T4&>}r+PcsM>F>*WzcEnM+ z%jU&jmDv}zEI^1wA~H!L#t;aDAVL8cMIN}06HhPVZ<6y&PF=1NNkK& z0Ko#t9Uf>A4F}-0U^<1d8^Vo9mFP6s2B2UZeCn$KM>v|7fXvWmodwrYY9#5YB$S(F zvojPqx)seB>B6SO9pDjh-yEH>YA(jOs zSU}rMtTiQ&I@t@TKa<1eFEt^%Q8uLnz5$UF1eU-`)KQ5|{P_=6kw{ie5>6!$L*$(r z%(+X2Gz3D>n4OSNOW@xxjjjhc6+?#+EJT1qMd~X%L$WtVs67AulNW*FkTf6(A_1#F zcXVr>fndYQr-W?o6hhdmAPuyrZ(=u$B1I^{5)u)#@_#_v{^{|&Bg_G+dOmF7bKNF4 z8j0ym8h@1@Z$+Z610iAMyDqgDU~4GTrpj4JvBC&FSlI z6HBkQt%(I;Rsux;(fIEa8~pE`>rcWm;U;GROxf{fD-8r)>`4H5sDVNTie4ZIhhPDO zKrtau=z)pIXU36UL+R`>``WA->lyTB#Hk!X@o{~RT513XZ4W$d&aBowx!m|?_xsDw zgl>gb)x}Woln6%Pa5@JZge#Vz2n(s778HY73V6pGr}@t{MN_AgP|xsBx1M9zb&NkV z+VT9;Ls*A;Qb`{)<-zaZ3XBw?BFzST*=xT0P0P0j|{q*%n6_CBj z(u{`F6D2|5ABs7Whk{gVcLdn1-2{v;Wc?_xC*)lee@RCWQ)(nf6QV#B<~!5O8XFBr;APIK2Q%os9cN3WLH04i2U9y~M<+^f zuP2`9?s;*wC3$Nm3~7b#Z4XJdH?7w4!Ky!ya}0s0tDZgh9O>bLJst8MxYLUWQOKF| z41=-yk!nF1=}Fcn67Y75c``TeJuej$xpRlkfwr5iW}MU;0pM?+E9s1v{C0;X3p7c= z;5Hb)#D%If5oLy$=a`<&@ZS_RS#!o|rb>(X*ZG(u;ByZSqTHur8R{_S6t6ghX`IC| zNS$htRX&Wa+6*L?+dOMn3bMQKo+qWZXY5VTMAZ$L){}9uJ273Hxwd50ji7+~YKwQO z5O^6zPD#1odW=u{a>&vweo*I`61>D0D^D{xRuG!no|UAe$huFRO%V7s#<`iJdVerC z;e?Mi9xQUKNxZ*U9y2=tG@`tEj`s0x9C8a5Xp1PTm5G#tEu*^6Fud*EeHjxN>+C+H z7E45DhvJ{QQIbX&wR21iNrvK09rFWJN}NOfF_Yt$688Fih_kXYmvI2&N0KJWnR5Hj z6uj-rsoykaNQxP|Ne=372WdnV+Mz)Oz7%Oxga%OZJnl{LaF8y=Z_3a=l4LosCxwOZ z!U_ZF)Gy*n*MVTtFC|OoEO6xyni72I8j2=UKgIi;LkMcg!X`lB7?DJNTRX_=c@InN zi{q0Y>}H{m=#GYNG^Eh>`Mj6nsJyxS3T6|?PZfeoF~BYQ0$iQbUAzxWhs;5!rns2% zglLMwO5k^^i0LwzlreiEi+Ro?c!dFT;g(bJkT6HTgw9_Jh6tM&$jykb>{FRbZ9DNi zR`ZMw6zCk2xpO5q#Vo=7Wbbwdn*jx>J&-zoqT*qsV1?*xN25G95v22MGS=p|6NL(I%OrtKBe^yP-7_b3aql8c*T8e;6bFg?>r(KK5%wMk z!&odP2ZBE`$xi+<5JZs_cuFCz%3(=Ixgz>nvGSvJw|i?cCt!4xI50}d!|HF&fq?yt zUc~Yw<~_q4gH|?;M>y|SuoXjN1&{J#`0Wk(ZncgKCWO9e%xOiMCGkaMD~a~s^8I|= zSsY-=!xlv%PXJUagf8dY{)lTaeEX_AO$1HbeGXLa9c}K9q$jt=?FsfhyMvk9*1a$jTnyoAJb-=s$4Np&`jbn!>^P@o=GL^BE&oVzVDDOZ zCsp#0tcf|FQ!>C=?UpH%dyu=Ipt0agPM~)x&pvpbB0mA#%5w;FIA^ZYCg_az(P3gu zt8w7Mv+mxk^Rb7!K= zEASgAlLq#>P4t_hHYf3u7OuC9H*>vwKf<1@-U^NhLtb4^!o+(N zCei2=JA}PybmEwTk*tWoNPfpuofP$BjLw-4d64tqhknUNR(4>F z(8^Dhc;aimd(CbL)%?HlfS3k|WbI4cOVF!btQZEbW5$x*M@Y3389xOwbWP;C#)xHRXc?p@>1IvMj0 z$FjxTi)rOG=dALYRl{+0Nyo5 diff --git a/so-playbook/playbook/passenger-nginx-config-template.erb b/so-playbook/playbook/passenger-nginx-config-template.erb deleted file mode 100644 index feb11fe..0000000 --- a/so-playbook/playbook/passenger-nginx-config-template.erb +++ /dev/null @@ -1,45 +0,0 @@ -<%= include_passenger_internal_template('global.erb') %> - -worker_processes 1; -events { - worker_connections 4096; -} - -http { - <%= include_passenger_internal_template('http.erb', 4) %> - - default_type application/octet-stream; - types_hash_max_size 2048; - server_names_hash_bucket_size 64; - client_max_body_size 1024m; - access_log off; - keepalive_timeout 60; - underscores_in_headers on; - gzip on; - gzip_comp_level 3; - gzip_min_length 150; - gzip_proxied any; - gzip_types text/plain text/css text/json text/javascript - application/javascript application/x-javascript application/json - application/rss+xml application/vnd.ms-fontobject application/x-font-ttf - application/xml font/opentype image/svg+xml text/xml; - - server { - server_name _; - listen 0.0.0.0:3000; - root '/usr/src/redmine/public'; - passenger_app_env 'production'; - passenger_spawn_method 'smart'; - passenger_load_shell_envvars off; - - location ~ ^/playbook(/.*|$) { - alias /usr/src/redmine/public$1; - passenger_base_uri /playbook; - passenger_app_root /usr/src/redmine; - passenger_document_root /usr/src/redmine/public; - passenger_enabled on; - } - } - - passenger_pre_start http://0.0.0.0:3000/; -} diff --git a/so-soctopus/.gitignore b/so-soctopus/.gitignore deleted file mode 100644 index 341e31e..0000000 --- a/so-soctopus/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -__pycache__ -.idea \ No newline at end of file diff --git a/so-soctopus/Dockerfile b/so-soctopus/Dockerfile deleted file mode 100644 index b2035ce..0000000 --- a/so-soctopus/Dockerfile +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright Security Onion Solutions, LLC - -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -FROM ghcr.io/security-onion-solutions/python:3-slim - -LABEL maintainer="Security Onion Solutions, LLC" -LABEL description="API for automating SOC-related functions" - -WORKDIR /SOCtopus - -RUN apt-get update && apt-get install -y --no-install-recommends git libmagic1 gcc python3-dev && rm -rf /var/lib/apt/lists/* && \ - git clone https://github.com/SigmaHQ/sigma.git && \ - mkdir -p /SOCtopus/templates && \ - mkdir -p /SOCtopus/playbook && \ - mkdir -p /var/log/SOCtopus - -COPY ./so-soctopus/requirements.txt /SOCtopus/ - -RUN pip install -r requirements.txt && \ - pip install gunicorn && \ - pip install bcrypt - -COPY ./so-soctopus /SOCtopus - -# Temp Patch for 2.4 - fixes Sigmac EQL issue -COPY ./elasticsearch.py /usr/local/lib/python3.9/site-packages/sigma/backends/elasticsearch.py - -ENTRYPOINT ["gunicorn", "-b", "0.0.0.0:7000", "wsgi:app", "--log-file", "/var/log/SOCtopus/soctopus.log"] \ No newline at end of file diff --git a/so-soctopus/elasticsearch.py b/so-soctopus/elasticsearch.py deleted file mode 100644 index 1d31b85..0000000 --- a/so-soctopus/elasticsearch.py +++ /dev/null @@ -1,1879 +0,0 @@ -# Output backends for sigmac -# Copyright 2016-2018 Thomas Patzke, Florian Roth (Nextron Systems), Devin Ferguson, Julien Bachmann - -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. - -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see . - -import json -import re -from fnmatch import fnmatch -import sys -import os -from random import randrange -from distutils.util import strtobool -from uuid import uuid4 - -import sigma -import yaml -from sigma.parser.modifiers.type import SigmaRegularExpressionModifier, SigmaTypeModifier -from sigma.parser.condition import ConditionOR, ConditionAND, NodeSubexpression, SigmaAggregationParser, SigmaConditionParser, SigmaConditionTokenizer - -from sigma.config.mapping import ConditionalFieldMapping -from .base import BaseBackend, SingleTextQueryBackend -from .mixins import RulenameCommentMixin, MultiRuleOutputMixin -from .exceptions import NotSupportedError - -class DeepFieldMappingMixin(object): - def fieldNameMapping(self, fieldname, value): - if isinstance(fieldname, str): - get_config = self.sigmaconfig.fieldmappings.get(fieldname) - if not get_config and '|' in fieldname: - fieldname = fieldname.split('|', 1)[0] - get_config = self.sigmaconfig.fieldmappings.get(fieldname) - if isinstance(get_config, ConditionalFieldMapping): - condition = self.sigmaconfig.fieldmappings.get(fieldname).conditions - for key, item in self.logsource.items(): - if condition.get(key) and condition.get(key, {}).get(item): - new_fieldname = condition.get(key, {}).get(item) - if any(new_fieldname): - return super().fieldNameMapping(new_fieldname[0], value) - return super().fieldNameMapping(fieldname, value) - - def generate(self, sigmaparser): - self.logsource = sigmaparser.parsedyaml.get("logsource", {}) - return super().generate(sigmaparser) - -class ElasticsearchWildcardHandlingMixin(object): - """ - Determine field mapping to keyword subfields depending on existence of wildcards in search values. Further, - provide configurability with backend parameters. - """ - options = SingleTextQueryBackend.options + ( - ("keyword_field", "keyword", "Keyword sub-field name (default is: '.keyword'). Set blank value if all keyword fields are the base(top-level) field. Additionally see 'keyword_base_fields' for more granular control of the base & subfield situation.", None), - ("analyzed_sub_field_name", "", "Analyzed sub-field name. By default analyzed field is the base field. Therefore, use this option to make the analyzed field a subfield. An example value would be '.text' ", None), - ("analyzed_sub_fields", None, "Fields that have an analyzed sub-field.", None), - ("keyword_base_fields", None, "Fields that the keyword is base (top-level) field. By default analyzed field is the base field. So use this option to change that logic. Valid options are: list of fields, single field. Also, wildcards * and ? allowed.", None), - ("keyword_whitelist", None, "Fields to always set as keyword. Bypasses case insensitive options. Valid options are: list of fields, single field. Also, wildcards * and ? allowed.", None), - ("keyword_blacklist", None, "Fields to never set as keyword (ie: always set as analyzed field). Bypasses case insensitive options. Valid options are: list of fields, single field. Also, wildcards * and ? allowed.", None), - ("case_insensitive_whitelist", None, "Fields to make the values case insensitive regex. Automatically sets the field as a keyword. Valid options are: list of fields, single field. Also, wildcards * and ? allowed.", None), - ("case_insensitive_blacklist", None, "Fields to exclude from being made into case insensitive regex. Valid options are: list of fields, single field. Also, wildcards * and ? allowed.", None), - ("wildcard_use_keyword", "true", "Use analyzed field or wildcard field if the query uses a wildcard value (ie: '*mall_wear.exe'). Set this to 'False' to use analyzed field or wildcard field. Valid options are: true/false", None), - ("hash_normalize", None, "Normalize hash fields to lowercase, uppercase or both. If this option is not used the field value stays untouched. Valid options are: lower/upper/both (default: both)", None), - ("not_bound_keyword", "\\*.keyword", "field name to use for keyword list search (default is: '\\*.keyword')", None), - ) - reContainsWildcard = re.compile("(?:(?\\*", value ) - # Make upper/lower - value = re.sub( r"[A-Za-z]", lambda x: "[" + x.group( 0 ).upper() + x.group( 0 ).lower() + "]", value ) - # Turn `.` into wildcard, only if odd number of '\'(because this would mean already escaped) - value = re.sub( r"(((?\.", value ) - # Turn `*` into wildcard, only if odd number of '\'(because this would mean already escaped) - value = re.sub( r"(((?.*", value ) - # Escape additional values that are treated as specific "operators" within Elastic. (ie: @, ?, &, <, >, and ~) - # reference: https://www.elastic.co/guide/en/elasticsearch/reference/current/regexp-syntax.html#regexp-optional-operators - value = re.sub( r"(((?])", "\g<1>\\\\\g<4>", value ) - # Validate regex - try: - re.compile(value) - return {'is_regex': True, 'value': value} - # Regex failed - except re.error: - raise TypeError( "Regular expression validation error for: '%s')" %str(value) ) - else: - return { 'is_regex': False, 'value': value } - -class ElasticsearchQuerystringBackend(DeepFieldMappingMixin, ElasticsearchWildcardHandlingMixin, SingleTextQueryBackend): - """Converts Sigma rule into Elasticsearch query string. Only searches, no aggregations.""" - identifier = "es-qs" - active = True - - reEscape = re.compile("([\s+\\-=!(){}\\[\\]^\"~:/]|(? expression dict, %s represents value - nullExpression = "%s == null" - notNullExpression = "%s != null" - mapExpression = "%s : %s" - mapListsSpecialHandling = False - mapListValueExpression = "%s : %s" - reEscape = re.compile(r"([\"]|(? 0: - ret += " until [ " + " ] [ ".join(excludeQueries) + " ]" - return ret - - raise NotImplementedError("Aggregation %s is not implemented for this backend" % agg.aggfunc_notrans) - - def generateEventCategory(self): - if len(self.categories) == 0: - return "any where " - elif len(self.categories) == 1: - return "%s where " % self.categories.pop() - # XXX raise NotImplementedError? >1 category is probably due to unmapped fields - return "any where " - - def generateBefore(self, parsed): - before = "" - - if self.sequence: - before += "sequence " - if self.maxspan != None: - before += "with maxspan=%s " % self.maxspan - before += "[ " - - before += self.generateEventCategory() - - return before - - def fieldNameMapping(self, fieldname, value): - if fieldname.count("-") > 0 or fieldname.count(" ") > 0 or fieldname[0].isdigit(): - return "`%s`" % fieldname - return fieldname - -class ElasticsearchDSLBackend(DeepFieldMappingMixin, RulenameCommentMixin, ElasticsearchWildcardHandlingMixin, BaseBackend): - """Converts Sigma rule into Elasticsearch DSL query""" - identifier = 'es-dsl' - active = True - options = RulenameCommentMixin.options + ElasticsearchWildcardHandlingMixin.options + ( - ("es", "http://localhost:9200", "Host and port of Elasticsearch instance", None), - ("output", "import", "Output format: import = JSON search request, curl = Shell script that do the search queries via curl", "output_type"), - ("set_size", "0", "value for the size of returned datasets.", None) - ) - interval = None - title = None - reEscape = re.compile( "([\s+\\-=!(){}\\[\\]^\"~:/]|(? 0: - self.queries[-1]['size'] = self.set_size - - # set _source from YAML-fields - columns = list() - mapped =None - try: - for field in sigmaparser.parsedyaml["fields"]: - mapped = sigmaparser.config.get_fieldmapping(field).resolve_fieldname(field, sigmaparser) - if type(mapped) == str: - columns.append(mapped) - elif type(mapped) == list: - columns.extend(mapped) - else: - raise TypeError("Field mapping must return string or list") - - fields = ",".join(str(x) for x in columns) - self.queries[-1]['_source'] = columns - except KeyError: # no 'fields' attribute - mapped = None - pass - - self.generateAfter(parsed) - - def generateQuery(self, parsed): - self.queries[-1]['query']['constant_score']['filter'] = self.generateNode(parsed.parsedSearch) - if parsed.parsedAgg: - self.generateAggregation(parsed.parsedAgg) - - def generateANDNode(self, node): - andNode = {'bool': {'must': []}} - for val in node: - andNode['bool']['must'].append(self.generateNode(val)) - return andNode - - def generateORNode(self, node): - orNode = {'bool': {'should': []}} - for val in node: - orNode['bool']['should'].append(self.generateNode(val)) - return orNode - - def generateNOTNode(self, node): - notNode = {'bool': {'must_not': []}} - for val in node: - notNode['bool']['must_not'].append(self.generateNode(val)) - return notNode - - def generateSubexpressionNode(self, node): - return self.generateNode(node.items) - - def generateListNode(self, node): - raise NotImplementedError("%s : (%s) Node type not implemented for this backend"%(self.title, 'generateListNode')) - - def cleanValue(self, value): - """ - Remove Sigma quoting from value. Currently, this appears only in one case: \\\\* - """ - return value.replace("\\\\*", "\\*") - - def escapeSlashes(self, value): - return value.replace("\\", "\\\\") - - def generateMapItemNode(self, node): - key, value = node - if type(value) is list: - res = {'bool': {'should': []}} - for v in value: - key_mapped = self.fieldNameMapping(key, v) - if isinstance(v, NodeSubexpression): - condition = v.items - res['bool']['should'].append(self.generateNode((key_mapped, condition.items))) - else: - if self.matchKeyword: # searches against keyword fields are wildcard searches, phrases otherwise - if self.CaseInSensitiveField: - queryType = 'regexp' - make_ci = self.makeCaseInSensitiveValue(self.reEscape.sub("\\\\\g<1>", str(v))) - value_cleaned = make_ci.get('value') - if not make_ci.get( 'is_regex' ): # Determine if still should be a regex - queryType = 'wildcard' - value_cleaned = self.escapeSlashes( self.cleanValue( str( v ) ) ) - else: - queryType = 'wildcard' - value_cleaned = self.escapeSlashes(self.cleanValue(str(v))) - else: - if self.containsWildcard(str(v)): - queryType = 'wildcard' - value_cleaned = self.escapeSlashes(self.cleanValue(str(v))) - else: - queryType = 'match_phrase' - value_cleaned = self.cleanValue(str(v)) - res['bool']['should'].append({queryType: {key_mapped: value_cleaned}}) - return res - elif value is None: - key_mapped = self.fieldNameMapping(key, value) - return { "bool": { "must_not": { "exists": { "field": key_mapped } } } } - elif type(value) in (str, int): - key_mapped = self.fieldNameMapping(key, value) - if self.matchKeyword: # searches against keyword fields are wildcard searches, phrases otherwise - if self.CaseInSensitiveField: - queryType = 'regexp' - make_ci = self.makeCaseInSensitiveValue( self.reEscape.sub( "\\\\\g<1>", str( value ) ) ) - value_cleaned = make_ci.get( 'value' ) - if not make_ci.get( 'is_regex' ): # Determine if still should be a regex - queryType = 'wildcard' - value_cleaned = self.escapeSlashes( self.cleanValue( str( value ) ) ) - else: - queryType = 'wildcard' - value_cleaned = self.escapeSlashes(self.cleanValue(str(value))) - else: - if self.containsWildcard(str(value)): - queryType = 'wildcard' - value_cleaned = self.escapeSlashes(self.cleanValue(str(value))) - else: - queryType = 'match_phrase' - value_cleaned = self.cleanValue(str(value)) - return {queryType: {key_mapped: value_cleaned}} - elif isinstance(value, SigmaRegularExpressionModifier): - key_mapped = self.fieldNameMapping(key, value) - return { 'regexp': { key_mapped: str(value) } } - else: - raise TypeError("Map values must be strings, numbers, lists, null or regular expression, not " + str(type(value))) - - def generateValueNode(self, node): - return {'multi_match': {'query': node, 'fields': [], 'type': 'phrase'}} - - def generateNULLValueNode(self, node): - return {'bool': {'must_not': {'exists': {'field': node.item}}}} - - def generateNotNULLValueNode(self, node): - return {'exists': {'field': node.item}} - - def generateAggregation(self, agg): - """ - Generates an Elasticsearch nested aggregation given a SigmaAggregationParser object - - Two conditions are handled here: - a) "count() by MyGroupedField > X" - b) "count(MyDistinctFieldName) by MyGroupedField > X' - - The case (b) is translated to a the following equivalent SQL query - - ``` - SELECT MyDistinctFieldName, COUNT(DISTINCT MyDistinctFieldName) FROM Table - GROUP BY MyGroupedField HAVING COUNT(DISTINCT MyDistinctFieldName) > 1 - ``` - - The resulting aggregation is set on 'self.queries[-1]["aggs"]' as a Python dict - - :param agg: Input SigmaAggregationParser object that defines a condition - :return: None - """ - if agg: - if agg.aggfunc == sigma.parser.condition.SigmaAggregationParser.AGGFUNC_COUNT: - if agg.groupfield is not None: - # If the aggregation is 'count(MyDistinctFieldName) by MyGroupedField > XYZ' - if agg.aggfield is not None: - count_agg_group_name = "{}_count".format(agg.groupfield) - count_distinct_agg_name = "{}_distinct".format(agg.aggfield) - script_limit = "params.count {} {}".format(agg.cond_op, agg.condition) - self.queries[-1]['aggs'] = { - count_agg_group_name: { - "terms": { - "field": "{}".format(agg.groupfield) - }, - "aggs": { - count_distinct_agg_name: { - "cardinality": { - "field": "{}".format(agg.aggfield) - } - }, - "limit": { - "bucket_selector": { - "buckets_path": { - "count": count_distinct_agg_name - }, - "script": script_limit - } - } - } - } - } - else: # if the condition is count() by MyGroupedField > XYZ - group_aggname = "{}_count".format(agg.groupfield) - count_agg_name = "single_{}_count".format(agg.groupfield) - self.queries[-1]['aggs'] = { - group_aggname: { - 'terms': { - 'field': '%s' % (agg.groupfield) - }, - 'aggs': { - count_agg_name: { - 'value_count': { - 'field': '%s' % agg.groupfield - } - }, - 'limit': { - 'bucket_selector': { - 'buckets_path': { - 'count': count_agg_name - }, - 'script': 'params.count %s %s' % (agg.cond_op, agg.condition) - } - } - } - } - } - else: - funcname = "" - for name, idx in agg.aggfuncmap.items(): - if idx == agg.aggfunc: - funcname = name - break - raise NotImplementedError("%s : The '%s' aggregation operator is not yet implemented for this backend" % (self.title, funcname)) - - def generateBefore(self, parsed): - self.queries.append({'query': {'constant_score': {'filter': {}}}}) - - def generateAfter(self, parsed): - dateField = 'date' - if self.sigmaconfig.config and 'dateField' in self.sigmaconfig.config: - dateField = self.sigmaconfig.config['dateField'] - if self.interval: - if 'bool' not in self.queries[-1]['query']['constant_score']['filter']: - saved_simple_query = self.queries[-1]['query']['constant_score']['filter'] - self.queries[-1]['query']['constant_score']['filter'] = {'bool': {'must': []}} - if len(saved_simple_query.keys()) > 0: - self.queries[-1]['query']['constant_score']['filter']['bool']['must'].append(saved_simple_query) - if 'must' not in self.queries[-1]['query']['constant_score']['filter']['bool']: - self.queries[-1]['query']['constant_score']['filter']['bool']['must'] = [] - - self.queries[-1]['query']['constant_score']['filter']['bool']['must'].append({'range': {dateField: {'gte': 'now-%s'%self.interval}}}) - - def finalize(self): - """ - Is called after the last file was processed with generate(). The right place if this backend is not intended to - look isolated at each rule, but generates an output which incorporates multiple rules, e.g. dashboards. - """ - index = '' - if self.indices is not None and len(self.indices) == 1: - index = '%s/'%self.indices[0] - - if self.output_type == 'curl': - for query in self.queries: - return "\curl -XGET '%s/%s_search?pretty' -H 'Content-Type: application/json' -d'%s'" % (self.es, index, json.dumps(query, indent=2)) - else: - if len(self.queries) == 1: - return json.dumps(self.queries[0], indent=2) - else: - return json.dumps(self.queries, indent=2) - -class KibanaBackend(ElasticsearchQuerystringBackend, MultiRuleOutputMixin): - """Converts Sigma rule into Kibana JSON Configuration files (searches only).""" - identifier = "kibana" - active = True - options = ElasticsearchQuerystringBackend.options + ( - ("output", "import", "Output format: import = JSON file manually imported in Kibana, curl = Shell script that imports queries in Kibana via curl (jq is additionally required)", "output_type"), - ("es", "localhost:9200", "Host and port of Elasticsearch instance", None), - ("index", ".kibana", "Kibana index", None), - ("prefix", "Sigma: ", "Title prefix of Sigma queries", None), - ) - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.kibanaconf = list() - self.indexsearch = set() - - def generate(self, sigmaparser): - description = sigmaparser.parsedyaml.setdefault("description", "") - - columns = list() - try: - for field in sigmaparser.parsedyaml["fields"]: - mapped = sigmaparser.config.get_fieldmapping(field).resolve_fieldname(field, sigmaparser) - if type(mapped) == str: - columns.append(mapped) - elif type(mapped) == list: - columns.extend(mapped) - else: - raise TypeError("Field mapping must return string or list") - except KeyError: # no 'fields' attribute - pass - - indices = sigmaparser.get_logsource().index - if len(indices) == 0: # fallback if no index is given - indices = ["*"] - - for parsed in sigmaparser.condparsed: - result = self.generateNode(parsed.parsedSearch) - - for index in indices: - rulename = self.getRuleName(sigmaparser) - if len(indices) > 1: # add index names if rule must be replicated because of ambigiuous index patterns - raise NotSupportedError("Multiple target indices are not supported by Kibana") - else: - title = self.prefix + sigmaparser.parsedyaml["title"] - - self.indexsearch.add( - "export {indexvar}=$(curl -s '{es}/{index}/_search?q=index-pattern.title:{indexpattern}' | jq -r '.hits.hits[0]._id | ltrimstr(\"index-pattern:\")')".format( - es=self.es, - index=self.index, - indexpattern=index.replace("*", "\\*"), - indexvar=self.index_variable_name(index) - ) - ) - self.kibanaconf.append({ - "_id": rulename, - "_type": "search", - "_source": { - "title": title, - "description": description, - "hits": 0, - "columns": columns, - "sort": ["@timestamp", "desc"], - "version": 1, - "kibanaSavedObjectMeta": { - "searchSourceJSON": { - "index": index, - "filter": [], - "highlight": { - "pre_tags": ["@kibana-highlighted-field@"], - "post_tags": ["@/kibana-highlighted-field@"], - "fields": { "*":{} }, - "require_field_match": False, - "fragment_size": 2147483647 - }, - "query": { - "query_string": { - "query": result, - "analyze_wildcard": True - } - } - } - } - } - }) - - def finalize(self): - if self.output_type == "import": # output format that can be imported via Kibana UI - for item in self.kibanaconf: # JSONize kibanaSavedObjectMeta.searchSourceJSON - item['_source']['kibanaSavedObjectMeta']['searchSourceJSON'] = json.dumps(item['_source']['kibanaSavedObjectMeta']['searchSourceJSON']) - if self.kibanaconf: - return json.dumps(self.kibanaconf, indent=2) - elif self.output_type == "curl": - for item in self.indexsearch: - return item - for item in self.kibanaconf: - item['_source']['kibanaSavedObjectMeta']['searchSourceJSON']['index'] = "$" + self.index_variable_name(item['_source']['kibanaSavedObjectMeta']['searchSourceJSON']['index']) # replace index pattern with reference to variable that will contain Kibana index UUID at script runtime - item['_source']['kibanaSavedObjectMeta']['searchSourceJSON'] = json.dumps(item['_source']['kibanaSavedObjectMeta']['searchSourceJSON']) # Convert it to JSON string as expected by Kibana - item['_source']['kibanaSavedObjectMeta']['searchSourceJSON'] = item['_source']['kibanaSavedObjectMeta']['searchSourceJSON'].replace("\\", "\\\\") # Add further escaping for escaped quotes for shell - return "curl -s -XPUT -H 'Content-Type: application/json' --data-binary @- '{es}/{index}/doc/{doc_id}' <","Index name used to add the alerts", None), #by default it creates a new index every day - ("type", "_doc","Index Type used to add the alerts", None) - - ) - watcher_urls = { - "watcher": "_watcher", - "xpack": "_xpack/watcher", - } - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.watcher_alert = dict() - self.url_prefix = self.watcher_urls[self.watcher_url] - - def generate(self, sigmaparser): - # get the details if this alert occurs - title = sigmaparser.parsedyaml.setdefault("title", "") - description = sigmaparser.parsedyaml.setdefault("description", "") - false_positives = sigmaparser.parsedyaml.setdefault("falsepositives", "") - level = sigmaparser.parsedyaml.setdefault("level", "") - tags = sigmaparser.parsedyaml.setdefault("tags", "") - # Get time frame if exists - interval = sigmaparser.parsedyaml["detection"].setdefault("timeframe", "30m") - dateField = self.sigmaconfig.config.get("dateField", "timestamp") - - # creating condition - indices = sigmaparser.get_logsource().index - # How many results to be returned. Usually 0 but for index action we need it. - size = 0 - - for condition in sigmaparser.condparsed: - rulename = self.getRuleName(sigmaparser) - result = self.generateNode(condition.parsedSearch) - agg = {} - alert_value_location = "" - try: - condition_value = int(condition.parsedAgg.condition) - min_doc_count = {} - if condition.parsedAgg.cond_op == ">": - alert_condition = { "gt": condition_value } - min_doc_count = { "min_doc_count": condition_value + 1 } - order = "desc" - elif condition.parsedAgg.cond_op == ">=": - alert_condition = { "gte": condition_value } - min_doc_count = { "min_doc_count": condition_value } - order = "desc" - elif condition.parsedAgg.cond_op == "<": - alert_condition = { "lt": condition_value } - order = "asc" - elif condition.parsedAgg.cond_op == "<=": - alert_condition = { "lte": condition_value } - order = "asc" - else: - alert_condition = {"not_eq": 0} - - agg_iter = list() - if condition.parsedAgg.aggfield is not None: # e.g. ... count(aggfield) ... - agg = { - "aggs": { - "agg": { - "terms": { - "field": condition.parsedAgg.aggfield, - "size": 10, - "order": { - "_count": order - }, - **min_doc_count - }, - **agg - } - } - } - alert_value_location = "agg.buckets.0." - agg_iter.append("agg.buckets") - if condition.parsedAgg.groupfield is not None: # e.g. ... by groupfield ... - agg = { - "aggs": { - "by": { - "terms": { - "field": condition.parsedAgg.groupfield, - "size": 10, - "order": { - "_count": order - }, - **min_doc_count - }, - **agg - } - } - } - alert_value_location = "by.buckets.0." + alert_value_location - agg_iter.append("by.buckets") - except KeyError: - alert_condition = {"not_eq": 0} - except AttributeError: - alert_condition = {"not_eq": 0} - - if agg != {}: - alert_value_location = "ctx.payload.aggregations." + alert_value_location + "doc_count" - agg_iter[0] = "aggregations." + agg_iter[0] - action_body = "Hits:\n" - action_body += "\n".join([ - ("{{#%s}}\n" + (2 * i * "-") + " {{key}} {{doc_count}}\n") % (agg_item) for i, agg_item in enumerate(agg_iter) - ]) - action_body += "\n".join([ - "{{/%s}}\n" % agg_item for agg_item in reversed(agg_iter) - ]) - else: - alert_value_location = "ctx.payload.hits.total" - action_body = "Hits:\n{{#ctx.payload.hits.hits}}" - try: # extract fields if these are given in rule - fields = sigmaparser.parsedyaml['fields'] - max_field_len = max([len(field) for field in fields]) - action_body += "Hit on {{_source.@timestamp}}:\n" + "\n".join([ - ("%" + str(max_field_len) + "s = {{_source.%s}}") % (field, field) for field in fields - ]) + (80 * "=") + "\n" - except KeyError: # no fields given, extract all hits - action_body += "{{_source}}\n" - action_body += (80 * "=") + "\n" - action_body += "{{/ctx.payload.hits.hits}}" - - # Building the action - action_subject = "Sigma Rule '%s'" % title - try: - eaction={} #email action - waction={} #webhook action - iaction={} #index action - action={} - alert_methods = self.alert_methods.split(',') - if 'email' in alert_methods: - # mail notification if mail address is given - email = self.mail - mail_profile = self.mail_profile - mail_from = self.mail_from - action_throttle_period = self.action_throttle_period - eaction = { - "send_email": { - "throttle_period": action_throttle_period, - "email": { - "profile": mail_profile, - "from": mail_from, - "to": email, - "subject": action_subject, - "body": action_body, - "attachments": { - "data.json": { - "data": { - "format": "json" - } - } - } - } - } - } - if 'webhook' in alert_methods: # WebHook Action. Sending metadata to a webservice. Added timestamp to metadata - http_scheme = self.http_scheme - http_host = self.http_host - http_port = self.http_port - http_uri_path = self.http_uri_path - http_method = self.http_method - http_phost = self.http_phost - http_pport = self.http_pport - http_user = self.http_user - http_pass = self.http_pass - waction = { - "httppost":{ - "transform":{ - "script": "ctx.metadata.timestamp=ctx.trigger.scheduled_time;" - }, - "webhook":{ - "scheme" : http_scheme, - "host" : http_host, - "port" : int(http_port), - "method" : http_method, - "path" : http_uri_path, - "params" : {}, - "headers" : {"Content-Type" : "application/json"}, - "body" : "{{#toJson}}ctx.metadata{{/toJson}}" - } - } - } - if (http_user) and (http_pass): - auth={ - "basic":{ - "username":http_user, - "password":http_pass - } - } - waction['httppost']['webhook']['auth']={} - waction['httppost']['webhook']['auth']=auth - - if (http_phost) and (http_pport): #As defined in documentation - waction['httppost']['webhook']['proxy']={} - waction['httppost']['webhook']['proxy']['host']=http_phost - waction['httppost']['webhook']['proxy']['port']=http_pport - - if 'index' in alert_methods: #Index Action. Adding metadata to actual events and send them in another index - index = self.index - dtype = self.type - size=1000 #I presume it will not be more than 1000 events detected - iaction = { - "elastic":{ - "transform":{ #adding title, description, tags on the event - "script": "ctx.payload.transform = [];for (int j=0;j 0: - index = index[0] - #Init a rule number cpt in case there are several elastalert rules generated from one Sigma rule - rule_number = 0 - for parsed in sigmaparser.condparsed: - #Static data - rule_object = { - "name": rulename, - "description": description, - "index": index, - "priority": self.convertLevel(level), - "realert": self.generateTimeframe(self.realert_time), - #"exponential_realert": self.generateTimeframe(self.expo_realert_time) - } - - rule_object['filter'] = self.generateQuery(parsed) - self.queries = [] - - #Handle aggregation - if parsed.parsedAgg: - if parsed.parsedAgg.aggfunc == sigma.parser.condition.SigmaAggregationParser.AGGFUNC_COUNT or parsed.parsedAgg.aggfunc == sigma.parser.condition.SigmaAggregationParser.AGGFUNC_MIN or parsed.parsedAgg.aggfunc == sigma.parser.condition.SigmaAggregationParser.AGGFUNC_MAX or parsed.parsedAgg.aggfunc == sigma.parser.condition.SigmaAggregationParser.AGGFUNC_AVG or parsed.parsedAgg.aggfunc == sigma.parser.condition.SigmaAggregationParser.AGGFUNC_SUM: - if parsed.parsedAgg.groupfield is not None: - rule_object['query_key'] = self.fieldNameMapping(parsed.parsedAgg.groupfield, '*') - rule_object['type'] = "metric_aggregation" - rule_object['buffer_time'] = interval - rule_object['doc_type'] = "doc" - - if parsed.parsedAgg.aggfunc == sigma.parser.condition.SigmaAggregationParser.AGGFUNC_COUNT: - rule_object['metric_agg_type'] = "cardinality" - else: - rule_object['metric_agg_type'] = parsed.parsedAgg.aggfunc_notrans - - if parsed.parsedAgg.aggfield: - rule_object['metric_agg_key'] = self.fieldNameMapping(parsed.parsedAgg.aggfield, '*') - else: - rule_object['metric_agg_key'] = "_id" - - condition_value = int(parsed.parsedAgg.condition) - if parsed.parsedAgg.cond_op == ">": - rule_object['max_threshold'] = condition_value - elif parsed.parsedAgg.cond_op == ">=": - rule_object['max_threshold'] = condition_value - 1 - elif parsed.parsedAgg.cond_op == "<": - rule_object['min_threshold'] = condition_value - elif parsed.parsedAgg.cond_op == "<=": - rule_object['min_threshold'] = condition_value - 1 - else: - rule_object['max_threshold'] = condition_value - 1 - rule_object['min_threshold'] = condition_value + 1 - else: - rule_object['type'] = "any" - - #Handle alert action - rule_object['alert'] = [] - alert_methods = self.alert_methods.split(',') - if 'email' in alert_methods: - rule_object['alert'].append('email') - rule_object['email'] = [] - for address in self.emails.split(','): - rule_object['email'].append(address) - if self.smtp_host: - rule_object['smtp_host'] = self.smtp_host - if self.from_addr: - rule_object['from_addr'] = self.from_addr - if self.smtp_auth_file: - rule_object['smtp_auth_file'] = self.smtp_auth_file - if 'http_post' in alert_methods: - if self.http_post_url is None: - print('Warning: the Elastalert HTTP POST method is selected but no URL has been provided.', file=sys.stderr) - else: - rule_object['http_post_url'] = self.http_post_url - - rule_object['alert'].append('post') - if self.http_post_include_rule_metadata: - rule_object['http_post_static_payload'] = { - 'sigma_rule_metadata': { - 'title': title, - 'description': description, - 'level': level, - 'tags': rule_tag - } - } - #If alert is not define put debug as default - if len(rule_object['alert']) == 0: - rule_object['alert'].append('debug') - - #Increment rule number - rule_number += 1 - self.elastalert_alerts[rule_object['name']] = rule_object - #Clear fields - self.fields = [] - return str(yaml.dump(rule_object, default_flow_style=False, width=10000)) - - def generateNode(self, node): - #Save fields for adding them in query_key - #if type(node) == sigma.parser.NodeSubexpression: - # for k,v in node.items.items: - # self.fields.append(k) - return super().generateNode(node) - - def generateTimeframe(self, timeframe): - time_unit = timeframe[-1:] - duration = timeframe[:-1] - timeframe_object = {} - if time_unit == "s": - timeframe_object['seconds'] = int(duration) - elif time_unit == "m": - timeframe_object['minutes'] = int(duration) - elif time_unit == "h": - timeframe_object['hours'] = int(duration) - elif time_unit == "d": - timeframe_object['days'] = int(duration) - else: - timeframe_object['months'] = int(duration) - return timeframe_object - - def generateAggregation(self, agg): - if agg: - if agg.aggfunc == sigma.parser.condition.SigmaAggregationParser.AGGFUNC_COUNT or \ - agg.aggfunc == sigma.parser.condition.SigmaAggregationParser.AGGFUNC_MIN or \ - agg.aggfunc == sigma.parser.condition.SigmaAggregationParser.AGGFUNC_MAX or \ - agg.aggfunc == sigma.parser.condition.SigmaAggregationParser.AGGFUNC_AVG or \ - agg.aggfunc == sigma.parser.condition.SigmaAggregationParser.AGGFUNC_SUM: - return "" - else: - for name, idx in agg.aggfuncmap.items(): - if idx == agg.aggfunc: - funcname = name - break - raise NotImplementedError("%s : The '%s' aggregation operator is not yet implemented for this backend" % ( self.title, funcname)) - - def convertLevel(self, level): - return { - 'critical': 1, - 'high': 2, - 'medium': 3, - 'low': 4 - }.get(level, 2) - - def finalize(self): - pass - # result = "" - # for rulename, rule in self.elastalert_alerts.items(): - # result += yaml.dump(rule, default_flow_style=False, width=10000) - # result += '\n' - # return result - -class ElastalertBackendDsl(ElastalertBackend, ElasticsearchDSLBackend): - """Converts Sigma rule into ElastAlert DSL query""" - identifier = 'elastalert-dsl' - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - def generateQuery(self, parsed): - #Generate ES DSL Query - super().generateBefore(parsed) - super().generateQuery(parsed) - super().generateAfter(parsed) - return self.queries - -class ElastalertBackendQs(ElastalertBackend, ElasticsearchQuerystringBackend): - """Converts Sigma rule into ElastAlert QS query""" - identifier = 'elastalert' - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - def generateQuery(self, parsed): - #Generate ES QS Query - return [{ 'query' : { 'query_string' : { 'query' : super().generateQuery(parsed) } } }] - -class ElasticSearchRuleBackend(object): - """Elasticsearch detection rule backend""" - active = True - uuid_black_list = [] - options = ElasticsearchQuerystringBackend.options + ( - ("put_filename_in_ref", False, "Want to have yml name in reference ?", None), - ("convert_to_url", False, "Want to convert to a URL ?", None), - ("path_to_replace", "../", "The local path to replace with dest_base_url", None), - ("dest_base_url", "https://github.com/SigmaHQ/sigma/tree/master/", "The URL prefix", None), - ("custom_tag", None , "Add custom tag. for multi split with a comma tag1,tag2 ", None), - ) - default_rule_type = "query" - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.tactics = self._load_mitre_file("tactics") - self.techniques = self._load_mitre_file("techniques") - self.rule_type = self.default_rule_type - self.rule_threshold = {} - - def _rule_lang_from_type(self): - rule_lang_map = { - "eql": "eql", - "query": "lucene", - "threat-match": "lucene", - "threshold": "lucene", - } - return rule_lang_map[self.rule_type] - - def _load_mitre_file(self, mitre_type): - try: - backend_dir = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "config", "mitre")) - path = os.path.join(backend_dir,"{}.json".format(mitre_type)) - with open(path, 'r') as config_file: - config = json.load(config_file) - return config - except (IOError, OSError) as e: - print("Failed to open {} configuration file '%s': %s".format(path, str(e)), file=sys.stderr) - return [] - except json.JSONDecodeError as e: - print("Failed to parse {} configuration file '%s' as valid YAML: %s" % (path, str(e)), file=sys.stderr) - return [] - - def generate(self, sigmaparser): - # reset per-detection variables - self.rule_type = self.default_rule_type - self.rule_threshold = {} - - translation = super().generate(sigmaparser) - if translation: - index = sigmaparser.get_logsource().index - if len(index) == 0: - index = ["apm-*-transaction", "auditbeat-*", "endgame-*", "filebeat-*", "packetbeat-*", "winlogbeat-*"] - configs = sigmaparser.parsedyaml - configs.update({"translation": translation}) - rule = self.create_rule(configs, index) - return rule - - def create_threat_description(self, tactics_list, techniques_list): - threat_list = list() - # sort lists for correct handling with subtechniques - tactics_list.sort(key=lambda x: x['external_id'], reverse=False) - techniques_list.sort(key=lambda x: x['technique_id'], reverse=False) - - for tactic in tactics_list: - temp_tactics = { - "tactic": { - "id": tactic.get("external_id", ""), - "reference": tactic.get("url", ""), - "name": tactic.get("tactic", "") - }, - "framework": "MITRE ATT&CK®" - } - temp_techniques = list() - for tech in techniques_list: - if tactic.get("tactic", "") in tech.get("tactic", []): - temp_techniques.append({ - "id": tech.get("technique_id", ""), - "name": tech.get("technique", ""), - "reference": tech.get("url", "") - }) - elif re.match('[T][0-9]{4}.[0-9]{3}', tech.get("technique_id", ""), re.IGNORECASE): - # add subtechnique to main technique - technique = tech.get("technique_id", "").split(".")[0] - technique_entry = list(filter(lambda temp_techniques: temp_techniques['id'] == technique, temp_techniques)) - - if technique_entry: - index = temp_techniques.index(technique_entry[0]) - temp_subtechniques = temp_techniques[index].get("subtechnique", []) - temp_subtechniques.append( - { - "id": tech.get("technique_id", ""), - "name": tech.get("technique", ""), - "reference": tech.get("url", "") - } - ) - temp_techniques[index].update({"subtechnique": temp_subtechniques}) - - temp_tactics.update({"technique": temp_techniques}) - threat_list.append(temp_tactics) - return threat_list - - def find_tactics(self, key_name=None, key_id=None): - for tactic in self.tactics: - if key_name and key_name == tactic.get("tactic", ""): - return tactic - if key_id and key_id == tactic.get("external_id", ""): - return tactic - - def find_technique(self, key_id=None): - for technique in self.techniques: - if key_id and key_id == technique.get("technique_id", ""): - return technique - - def map_risk_score(self, level): - if level not in ["low","medium","high","critical"]: - level = "medium" - if level == "low": - return 5 - elif level == "medium": - return 35 - elif level == "high": - return 65 - elif level == "critical": - return 95 - - def map_severity(self, severity): - severity = severity.lower() - if severity in ["low","medium","high","critical"]: - return severity - elif severity == "informational": - return "low" - else: - return "medium" - - def build_ymlfile_ref(self, configs): - if self.put_filename_in_ref == False: # Dont want - return None - - yml_filename = configs.get("yml_filename") - yml_path = configs.get("yml_path") - if yml_filename == None or yml_path == None: - return None - - if self.convert_to_url: - yml_path = yml_path.replace('\\','/') #windows path to url - self.path_to_replace = self.path_to_replace.replace('\\','/') #windows path to url - if self.path_to_replace not in yml_path: #Error to change - return None - - new_ref = yml_path.replace(self.path_to_replace,self.dest_base_url) + '/' + yml_filename - else: - new_ref = yml_filename - return new_ref - - def create_rule(self, configs, index): - tags = configs.get("tags", []) - tactics_list = list() - technics_list = list() - new_tags = list() - - # sort tags so it looks nice :) - tags.sort() - - for tag in tags: - tag = tag.replace("attack.", "") - # if there's a subtechnique, add main technique to the list if not already there - if re.match("[t][0-9]{4}.[0-9]{3}", tag, re.IGNORECASE): - technique = tag.split('.')[0] - if technique not in tags and technique.title() not in new_tags: - tech = self.find_technique(technique.title()) - if tech: - new_tags.append(technique.title()) - technics_list.append(tech) - - if re.match("[t][0-9]{4}", tag, re.IGNORECASE): - tech = self.find_technique(tag.title()) - if tech: - new_tags.append(tag.title()) - technics_list.append(tech) - else: - if "_" in tag: - tag_list = tag.split("_") - tag_list = [item.title() for item in tag_list] - tact = self.find_tactics(key_name=" ".join(tag_list)) - if tact: - new_tags.append(" ".join(tag_list)) - tactics_list.append(tact) - elif re.match("[ta][0-9]{4}", tag, re.IGNORECASE): - tact = self.find_tactics(key_id=tag.upper()) - if tact: - new_tags.append(tag.upper()) - tactics_list.append(tact) - else: - tact = self.find_tactics(key_name=tag.title()) - if tact: - tactics_list.append(tact) - - # capitalize if not a MITRE CAR tag - if re.match("car.\d{4}-\d{2}-\d{3}", tag, re.IGNORECASE): - new_tags.append(tag) - else: - new_tags.append(tag.title()) - - if self.custom_tag: - if ',' in self.custom_tag: - tag_split = self.custom_tag.split(",") - for l_tag in tag_split: - new_tags.append(l_tag) - else: - new_tags.append(self.custom_tag) - - threat = self.create_threat_description(tactics_list=tactics_list, techniques_list=technics_list) - rule_name = configs.get("title", "").lower() - rule_uuid = configs.get("id", "").lower() - if rule_uuid == "": - rule_uuid = str(uuid4()) - if rule_uuid in self.uuid_black_list: - rule_uuid = str(uuid4()) - self.uuid_black_list.append(rule_uuid) - rule_id = re.sub(re.compile('[()*+!,\[\].\s"]'), "_", rule_uuid) - risk_score = self.map_risk_score(configs.get("level", "medium")) - references = configs.get("reference") - if references is None: - references = configs.get("references") - falsepositives = [] - yml_falsepositives = configs.get('falsepositives',["Unknown"]) - if isinstance(yml_falsepositives,str): - falsepositives.append(yml_falsepositives) - else: - falsepositives=yml_falsepositives - - add_ref_yml= self.build_ymlfile_ref(configs) - if add_ref_yml: - if references is None: # No ref - references=[] - if add_ref_yml in references: - pass # else put a duplicate ref for multi rule file - else: - references.append(add_ref_yml) - - # add author filed depending on data type in rule file - author = configs.get("author", "") - if isinstance(author, str): - author_list = author.split(', ') - elif isinstance(author, list): - author_list = author - else: - author_list = [] - - rule = { - "author": author_list, - "description": configs.get("description", ""), - "enabled": True, - "false_positives": falsepositives, - "filters": [], - "from": "now-360s", - "immutable": False, - "index": index, - "interval": "5m", - "rule_id": rule_id, - "language": self._rule_lang_from_type(), - "output_index": ".siem-signals-default", - "max_signals": 100, - "risk_score": risk_score, - "name": configs.get("title", ""), - "query":configs.get("translation"), - "meta": { - "from": "1m" - }, - "severity": self.map_severity(configs.get("level", "medium")), - "tags": new_tags, - "to": "now", - "type": self.rule_type, - "threat": threat, - "version": 1 - } - if self.rule_type == "threshold": - rule.update({"threshold": self.rule_threshold}) - if references: - rule.update({"references": references}) - return json.dumps(rule) - - -class ElasticSearchRuleEqlBackend(ElasticSearchRuleBackend, ElasticsearchEQLBackend): - """Converts Sigma rule into Elastic SIEM EQL query""" - default_rule_type = "eql" - identifier = "es-rule-eql" - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - -class ElasticSearchRuleQsBackend(ElasticSearchRuleBackend, ElasticsearchQuerystringBackend): - """Converts Sigma rule into Elastic SIEM lucene query""" - identifier = "es-rule" - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - def generateAggregation(self, agg): - if agg.aggfunc == SigmaAggregationParser.AGGFUNC_COUNT: - if agg.cond_op not in [">", ">="]: - raise NotImplementedError("Threshold rules can only handle > and >= operators") - if agg.aggfield: - raise NotImplementedError("Threshold rules cannot COUNT(DISTINCT %s)" % agg.aggfield) - self.rule_type = "threshold" - self.rule_threshold = { - "field": [agg.groupfield] if agg.groupfield else [], - "value": int(agg.condition) if agg.cond_op == ">=" else int(agg.condition) + 1 - } - return "" - raise NotImplementedError("Aggregation %s is not implemented for this backend" % agg.aggfunc_notrans) - -class KibanaNdjsonBackend(ElasticsearchQuerystringBackend, MultiRuleOutputMixin): - """Converts Sigma rule into Kibana JSON Configuration files (searches only).""" - identifier = "kibana-ndjson" - active = True - options = ElasticsearchQuerystringBackend.options + ( - ("output", "import", "Output format: import = JSON file manually imported in Kibana, curl = Shell script that imports queries in Kibana via curl (jq is additionally required)", "output_type"), - ("es", "localhost:9200", "Host and port of Elasticsearch instance", None), - ("index", ".kibana", "Kibana index", None), - ("prefix", "Sigma: ", "Title prefix of Sigma queries", None), - ) - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.kibanaconf = list() - self.indexsearch = set() - - def generate(self, sigmaparser): - description = sigmaparser.parsedyaml.setdefault("description", "") - - columns = list() - try: - for field in sigmaparser.parsedyaml["fields"]: - mapped = sigmaparser.config.get_fieldmapping(field).resolve_fieldname(field, sigmaparser) - if type(mapped) == str: - columns.append(mapped) - elif type(mapped) == list: - columns.extend(mapped) - else: - raise TypeError("Field mapping must return string or list") - except KeyError: # no 'fields' attribute - pass - - indices = sigmaparser.get_logsource().index - if len(indices) == 0: # fallback if no index is given - indices = ["*"] - - for parsed in sigmaparser.condparsed: - result = self.generateNode(parsed.parsedSearch) - - for index in indices: - rulename = self.getRuleName(sigmaparser) - if len(indices) > 1: # add index names if rule must be replicated because of ambigiuous index patterns - raise NotSupportedError("Multiple target indices are not supported by Kibana") - else: - title = self.prefix + sigmaparser.parsedyaml["title"] - - self.indexsearch.add( - "export {indexvar}=$(curl -s '{es}/{index}/_search?q=index-pattern.title:{indexpattern}' | jq -r '.hits.hits[0]._id | ltrimstr(\"index-pattern:\")')".format( - es=self.es, - index=self.index, - indexpattern=index.replace("*", "\\*"), - indexvar=self.index_variable_name(index) - ) - ) - self.kibanaconf.append({ - "id": rulename, - "type": "search", - "attributes": { - "title": title, - "description": description, - "hits": 0, - "columns": columns, - "sort": ["@timestamp", "desc"], - "version": 1, - "kibanaSavedObjectMeta": { - "searchSourceJSON": { - "index": index, - "filter": [], - "highlight": { - "pre_tags": ["@kibana-highlighted-field@"], - "post_tags": ["@/kibana-highlighted-field@"], - "fields": { "*":{} }, - "require_field_match": False, - "fragment_size": 2147483647 - }, - "query": { - "query_string": { - "query": result, - "analyze_wildcard": True - } - } - } - } - }, - "references": [ - { - "id": index, - "name": "kibanaSavedObjectMeta.searchSourceJSON.index", - "type": "index-pattern" - } - ] - }) - - def finalize(self): - if self.output_type == "import": # output format that can be imported via Kibana UI - for item in self.kibanaconf: # JSONize kibanaSavedObjectMeta.searchSourceJSON - item['attributes']['kibanaSavedObjectMeta']['searchSourceJSON'] = json.dumps(item['attributes']['kibanaSavedObjectMeta']['searchSourceJSON']) - if self.kibanaconf: - ndjson = "" - for item in self.kibanaconf: - ndjson += json.dumps(item) - ndjson += "\n" - return ndjson - elif self.output_type == "curl": - for item in self.indexsearch: - return item - for item in self.kibanaconf: - item['attributes']['kibanaSavedObjectMeta']['searchSourceJSON']['index'] = "$" + self.index_variable_name(item['attributes']['kibanaSavedObjectMeta']['searchSourceJSON']['index']) # replace index pattern with reference to variable that will contain Kibana index UUID at script runtime - item['attributes']['kibanaSavedObjectMeta']['searchSourceJSON'] = json.dumps(item['attributes']['kibanaSavedObjectMeta']['searchSourceJSON']) # Convert it to JSON string as expected by Kibana - item['attributes']['kibanaSavedObjectMeta']['searchSourceJSON'] = item['attributes']['kibanaSavedObjectMeta']['searchSourceJSON'].replace("\\", "\\\\") # Add further escaping for escaped quotes for shell - return "curl -s -XPUT -H 'Content-Type: application/json' --data-binary @- '{es}/{index}/doc/{doc_id}' <") -def sendFIR(esid): - return createFIREvent(esid) - - -@app.route("/grr/flow/+") -def sendGRR(esid, flow_name): - return createGRRFlow(esid, flow_name) - - -@app.route("/thehive/alert/") -def createAlertHive(esid): - return createHiveAlert(esid) - -@app.route("/thehive/case/") -def createCaseHive(esid): - return createHiveCase(esid) - -@app.route("/thehive/alert/send", methods=['POST']) -def sendAlertHive(): - if request.method == 'POST': - if request.form['submit_button'] == 'Submit': - result = request.form.to_dict() - title = result['title'] - tlp = result['tlp'] - description = result['description'].strip('\"') - tags = result['tags'] - artifact_string = result['artifact_string'] - sourceRef = result['sourceRef'] - return sendHiveAlert(title, tlp, tags, description, sourceRef, artifact_string) - else: - return render_template("cancel.html") - -@app.route("/thehive/case/send", methods=['POST']) -def sendCaseHive(): - if request.method == 'POST': - if request.form['submit_button'] == 'Submit': - result = request.form.to_dict() - title = result['title'] - #tlp = result['tlp'] - description = result['description'].strip('\"') - #tags = result['tags'] - #artifact_string = result['artifact_string'] - #sourceRef = result['sourceRef'] - severity = result['severity'] - return sendHiveCase(title, description, severity) - else: - return render_template("cancel.html") - -@app.route("/misp/event/") -def sendMISP(esid): - return createMISPEvent(esid) - - -@app.route("/rtir/incident/") -def sendRTIR(esid): - return createRTIRIncident(esid) - - -@app.route("/slack/") -def sendSlack(esid): - return createSlackAlert(esid) - -@app.route("/playbook/webhook", methods=['POST']) -def sendPlaybook(): - webhook_content = request.get_json() - return playbookWebhook(webhook_content) - - -@app.route("/playbook/sigmac", methods=['POST']) -def sendSigma(): - raw = request.get_data(as_text=True) - return playbookSigmac(raw) - - -@app.route("/playbook/play", methods=['POST']) -def sendSigmaYaml(): - sigma_raw = request.get_data(as_text=True) - sigma_dict = yaml.load(sigma_raw) - return playbookCreatePlay(sigma_raw, sigma_dict) - - -@app.route("/es/showresult/") -def sendESQuery(esid): - return showESResult(esid) - - -@app.route("/es/event/modify/") -def sendModifyESEvent(esid): - return eventModifyFields(esid) - - -@app.route("/es/event/update", methods=['GET', 'POST']) -def sendESEventUpdate(): - if request.method == 'POST': - result = request.form - esid = result['esid'] - esindex = result['esindex'] - tags = result['tags'] - return eventUpdateFields(esindex, esid, tags) - - -@app.route("/enrich", methods=['POST']) -def sendEnrich(): - if request.method == 'POST': - webhook_content = request.get_json() - return processHiveReq(webhook_content) - - -if __name__ == "__main__": - app.run(host='0.0.0.0', port=7000) diff --git a/so-soctopus/so-soctopus/config.py b/so-soctopus/so-soctopus/config.py deleted file mode 100644 index b86e968..0000000 --- a/so-soctopus/so-soctopus/config.py +++ /dev/null @@ -1,8 +0,0 @@ -# Base config -import configparser - -parser = configparser.ConfigParser() -parser.read('SOCtopus.conf') - -es_index = parser.get('es', 'es_index_pattern', fallback='so-*') - diff --git a/so-soctopus/so-soctopus/destinations.py b/so-soctopus/so-soctopus/destinations.py deleted file mode 100644 index 7e4b84f..0000000 --- a/so-soctopus/so-soctopus/destinations.py +++ /dev/null @@ -1,772 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -from helpers import get_hits, get_conn, do_update -from thehive4py.api import TheHiveApi -from thehive4py.models import Alert, AlertArtifact -from pymisp import PyMISP -from requests.auth import HTTPBasicAuth -from flask import redirect, render_template, jsonify -from forms import DefaultForm -from config import parser, es_index -import playbook -import json -import uuid -import sys -import rt -import requests -import os -import base64 -import time -import jsonpickle -import urllib3 - -urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) - -es_url = parser.get('es', 'es_url') -hive_url = parser.get('hive', 'hive_url') -hive_key = parser.get('hive', 'hive_key') -hive_verifycert = parser.getboolean('hive', 'hive_verifycert', fallback=False) - -def hiveInit(): - return TheHiveApi(hive_url, hive_key, cert=hive_verifycert) - - -def createHiveCase(esid): - search = get_hits(esid) - tlp = int(parser.get('hive', 'hive_tlp')) - severity = 2 - for item in search['hits']['hits']: - result = item['_source'] - es_id = item['_id'] - try: - message = result['message'] - description = str(message) - except: - description = str(result) - sourceRef = str(uuid.uuid4())[0:6] - tags = ["SecurityOnion"] - artifacts = [] - event = result['event'] - src = srcport = dst = dstport = None - if event['dataset'] == 'alert': - title = result['rule']['name'] - else: - title = f'New {event["module"].capitalize()} {event["dataset"].capitalize()} Event' - form = DefaultForm() - #artifact_string = jsonpickle.encode(artifacts) - return render_template('hive.html', title=title, description=description, severity=severity, form=form) - -def createHiveAlert(esid): - search = get_hits(esid) - # Hive Stuff - hive_url = parser.get('hive', 'hive_url') - hive_api = hiveInit() - tlp = int(parser.get('hive', 'hive_tlp')) - for item in search['hits']['hits']: - # Get initial details - result = item['_source'] - message = result['message'] - es_id = item['_id'] - description = str(message) - sourceRef = str(uuid.uuid4())[0:6] - tags = ["SecurityOnion"] - artifacts = [] - event = result['event'] - src = srcport = dst = dstport = None - - if 'source' in result: - if 'ip' in result['source']: - src = str(result['source']['ip']) - if 'port' in result['source']: - srcport = str(result['source']['port']) - if 'destination' in result: - if 'ip' in result['destination']: - dst = str(result['destination']['ip']) - if 'port' in result['destination']: - dstport = str(result['destination']['port']) - - # NIDS Alerts - if event['module'] == 'ids': - alert = result['rule']['name'] - sid = str(result['rule']['signature_id']) - category = result['rule']['category'] - sensor = result['observer']['name'] - masterip = str(es_url.split("//")[1].split(":")[0]) - tags.append("nids") - tags.append(category) - title = alert - print(alert) - sys.stdout.flush() - # Add artifacts - artifacts.append(AlertArtifact(dataType='ip', data=src)) - artifacts.append(AlertArtifact(dataType='ip', data=dst)) - artifacts.append(AlertArtifact(dataType='other', data=sensor)) - description = "`NIDS Dashboard:` \n\n \n\n `IPs: `" + src + ":" + srcport + "-->" + dst + ":" + dstport + "\n\n `Signature:`" + alert + "\n\n `PCAP:` " + "https://" + masterip + "/kibana/so-soctopus//sensoroni/securityonion/joblookup?redirectUrl=/sensoroni/&esid=" + es_id - - # Zeek logs - elif event['module'] == 'zeek': - _map_key_type = { - "conn": "Connection", - "dhcp": "DHCP", - "dnp3": "DNP3", - "dns": "DNS", - "file": "Files", - "ftp": "FTP", - "http": "HTTP", - "intel": "Intel", - "irc": "IRC", - "kerberos": "Kerberos", - "modbus": "Modbus", - "mysql": "MySQL", - "ntlm": "NTLM", - "pe": "PE", - "radius": "RADIUS", - "rdp": "RDP", - "rfb": "RFB", - "sip": "SIP", - "smb": "SMB", - "smtp": "SMTP", - "snmp": "SNMP", - "ssh": "SSH", - "ssl": "SSL", - "syslog": "Syslog", - "weird": "Weird", - "x509": "X509" - } - - zeek_tag = event['dataset'] - zeek_tag_title = _map_key_type.get(zeek_tag) - title = str('New Zeek ' + zeek_tag_title + ' record!') - - if src: - artifacts.append(AlertArtifact(dataType='ip', data=src)) - if dst: - artifacts.append(AlertArtifact(dataType='ip', data=dst)) - if result.get('observer', {}).get('name'): - sensor = str(result['observer']['name']) - artifacts.append(AlertArtifact(dataType='other', data=sensor)) - if result.get('log', {}).get('id', {}).get('uid'): - uid = str(result['log']['id']['uid']) - title = str('New Zeek ' + zeek_tag_title + ' record! - ' + uid) - artifacts.append(AlertArtifact(dataType='other', data=uid)) - if result.get('log', {}).get('id', {}).get('fuid'): - fuid = str(result['log']['id']['fuid']) - title = str('New Zeek ' + zeek_tag_title + ' record! - ' + fuid) - artifacts.append(AlertArtifact(dataType='other', data=fuid)) - if result.get('log', {}).get('id', {}).get('id'): - fuid = str(result['log']['id']['id']) - title = str('New Zeek ' + zeek_tag_title + ' record! - ' + fuid) - artifacts.append(AlertArtifact(dataType='other', data=fuid)) - - tags.append('zeek') - tags.append(zeek_tag) - - # Wazuh/OSSEC logs - elif event['module'] == 'ossec': - agent_name = result['agent']['name'] - if 'description' in result: - ossec_desc = result['rule']['description'] - else: - ossec_desc = result['log']['full'] - if 'ip' in result['agent']: - agent_ip = result['agent']['ip'] - artifacts.append(AlertArtifact(dataType='ip', data=agent_ip)) - artifacts.append(AlertArtifact(dataType='other', data=agent_name)) - else: - artifacts.append(AlertArtifact(dataType='other', data=agent_name)) - - title = ossec_desc - tags.append("wazuh") - - # Sysmon logs - elif event['module'] == 'sysmon': - if 'ossec' in result['tags']: - agent_name = result['agent']['name'] - agent_ip = result['agent']['ip'] - artifacts.append(AlertArtifact(dataType='ip', data=agent_ip)) - artifacts.append(AlertArtifact(dataType='other', data=agent_name)) - tags.append("wazuh") - elif 'beat' in result['tags']: - agent_name = str(result['agent']['hostname']) - if result.get('agent'): - try: - os_name = str(result['agent']['os']['name']) - artifacts.append(AlertArtifact(dataType='other', data=os_name)) - except: - pass - try: - beat_name = str(result['agent']['name']) - artifacts.append(AlertArtifact(dataType='other', data=beat_name)) - except: - pass - if result.get('source', {}).get('hostname'): - source_hostname = result['source']['hostname'] - artifacts.append(AlertArtifact(dataType='fqdn', data=source_hostname)) - if result.get('source', {}).get('ip'): - source_ip = str(result['source']['ip']) - artifacts.append(AlertArtifact(dataType='ip', data=source_ip)) - if result.get('destination', {}).get('ip'): - destination_ip = str(result['destination']['ip']) - artifacts.append(AlertArtifact(dataType='ip', data=destination_ip)) - # FIXME: find what "image_path" has been changed to - # if 'image_path' in result: - # image_path = str(result['image_path']) - # artifacts.append(AlertArtifact(dataType='filename', data=image_path)) - # FIXME: find what "Hashes" has been changed to - # if 'Hashes' in result['data']['data']: - # hashes = result['event']['data']['Hashes'] - # for hash in hashes.split(','): - # if hash.startswith('MD5') or hash.startswith('SHA256'): - # artifacts.append(AlertArtifact(dataType='hash', data=hash.split('=')[1])) - tags.append("agent") - else: - agent_name = '' - title = "New Sysmon Event! - " + agent_name - - else: - title = f'New {event["module"]}_{event["dataset"]} Event From Security Onion' - form = DefaultForm() - artifact_string = jsonpickle.encode(artifacts) - return render_template('hive.html', title=title, tlp=tlp, tags=tags, description=description, - artifact_string=artifact_string, sourceRef=sourceRef, form=form) - - -def sendHiveAlert(title, tlp, tags, description, sourceRef, artifact_string): - tlp = int(parser.get('hive', 'hive_tlp')) - - hive_api = hiveInit() - - newtags = tags.strip('][').replace("'", "").split(', ') - description = description.strip('"') - artifacts = json.loads(artifact_string) - - # Build alert - hivealert = Alert( - title=title, - tlp=tlp, - tags=newtags, - description=description, - type='external', - source='SecurityOnion', - sourceRef=sourceRef, - artifacts=artifacts - ) - - # Send it off - response = hive_api.create_alert(hivealert) - if response.status_code == 201: - print(json.dumps(response.json(), indent=4, sort_keys=True)) - print('') - - else: - print('ko: {}/{}'.format(response.status_code, response.text)) - sys.exit(0) - - # Redirect to TheHive instance - return redirect(hive_url + '/index.html#!/alert/list') - - - -def sendHiveCase(title, description, severity): - soc_url = parser.get('soc', 'soc_url') - description = str(description.strip('"')) - - headers = { - 'Content-Type': 'application/json', - } - - data = {"title": title, "description": description, "severity": int(severity)} - - response = requests.post(soc_url + '/api/case', headers=headers, json=data, verify=False) - if response.status_code == 200: - print(json.dumps(response.json(), indent=4, sort_keys=True)) - print('') - - else: - print('ko: {}/{}'.format(response.status_code, response.text)) - sys.exit(0) - - # Redirect to TheHive instance - return redirect(hive_url + '/index.html') - - -def createMISPEvent(esid): - search = get_hits(esid) - # MISP Stuff - misp_url = parser.get('misp', 'misp_url') - misp_key = parser.get('misp', 'misp_key') - misp_verifycert = parser.getboolean('misp', 'misp_verifycert', fallback=False) - distrib = parser.get('misp', 'distrib') - threat = parser.get('misp', 'threat') - analysis = parser.get('misp', 'analysis') - - for result in search['hits']['hits']: - result = result['_source'] - message = result['message'] - description = str(message) - info = description - - def init(url, key): - return PyMISP(url, key, ssl=misp_verifycert, debug=True) - - misp = init(misp_url, misp_key) - - event = misp.new_event(distrib, threat, analysis, info) - event_id = str(event['Event']['id']) - - if result.get('source', {}).get('ip'): - data_type = "ip-src" - source_ip = result['source']['ip'] - misp.add_named_attribute(event_id, data_type, source_ip) - - if result.get('destination', {}).get('ip'): - data_type = "ip-dst" - destination_ip = result['destination']['ip'] - misp.add_named_attribute(event_id, data_type, destination_ip) - - # Redirect to MISP instance - return redirect(misp_url + '/events/index') - - -def createGRRFlow(esid, flow_name): - search = get_hits(esid) - - tlp = int(parser.get('hive', 'hive_tlp')) - hive_api = hiveInit() - - grr_url = parser.get('grr', 'grr_url') - grr_user = parser.get('grr', 'grr_user') - grr_pass = parser.get('grr', 'grr_pass') - grrapi = api.InitHttp(api_endpoint=grr_url, - auth=(grr_user, grr_pass)) - - base64string = '%s:%s' % (grr_user, grr_pass) - base64string = base64.b64encode(bytes(base64string, "utf-8")) - auth_header = "Basic %s" % base64string - index_response = requests.get(grr_url, auth=HTTPBasicAuth(grr_user, grr_pass)) - csrf_token = index_response.cookies.get("csrftoken") - headers = { - "Authorization": auth_header, - "x-csrftoken": csrf_token, - "x-requested-with": "XMLHttpRequest" - } - cookies = { - "csrftoken": csrf_token - } - - for result in search['hits']['hits']: - result = result['_source'] - message = result['message'] - - if result.get('source', {}).get('ip'): - source_ip = result['source']['ip'] - - if result.get('destination', {}).get('ip'): - destination_ip = result['destination']['ip'] - - for ip in source_ip, destination_ip: - search_result = grrapi.SearchClients(ip) - grr_result = {} - client_id = '' - for client in search_result: - # Get client id - client_id = client.client_id - client_last_seen_at = client.data.last_seen_at - grr_result[client_id] = client_last_seen_at - if client_id is None: - pass - - # Process flow and get flow id - flow_id = listProcessFlow(client_id, grr_url, headers, cookies, grr_user, grr_pass) - - # Get status - status = checkFlowStatus(client_id, grr_url, flow_id, headers, cookies, grr_user, grr_pass) - - # Keep checking to see if complete - while status != "terminated": - time.sleep(15) - print("Flow not yet completed..waiting 15 secs before attempting to check status again...") - status = checkFlowStatus(client_id, grr_url, flow_id, headers, cookies, grr_user, grr_pass) - - # If terminated, run the download - if status == "terminated": - downloadFlowResults(client_id, grr_url, flow_id, headers, cookies, grr_user, grr_pass) - - # Run flow via API client - # flow_obj = grrapi.Client(client_id) - # flow_obj.CreateFlow(name=flow_name) - title = "Test Alert with GRR Flow" - description = str(message) - sourceRef = str(uuid.uuid4())[0:6] - tags = ["SecurityOnion", "GRR"] - artifacts = [] - filepath = "/tmp/soctopus/" + client_id + ".zip" - artifacts.append(AlertArtifact(dataType='file', data=str(filepath))) - - # Build alert - hive_alert = Alert( - title=title, - tlp=tlp, - tags=tags, - description=description, - type='external', - source='SecurityOnion', - sourceRef=sourceRef, - artifacts=artifacts - ) - - # Send it off - response = hive_api.create_alert(hive_alert) - - if client_id: - # Redirect to GRR instance - return redirect(grr_url + '/#/clients/' + client_id + '/flows') - else: - return "No matches found for source or destination ip" - - -def createRTIRIncident(esid): - search = get_hits(esid) - rtir_url = parser.get('rtir', 'rtir_url') - rtir_api = parser.get('rtir', 'rtir_api') - rtir_user = parser.get('rtir', 'rtir_user') - rtir_pass = parser.get('rtir', 'rtir_pass') - rtir_queue = parser.get('rtir', 'rtir_queue') - rtir_creator = parser.get('rtir', 'rtir_creator') - verify_cert = parser.getboolean('rtir', 'rtir_verifycert', fallback=False) - - for result in search['hits']['hits']: - result = result['_source'] - message = result['message'] - description = str(message) - event = result['event'] - rtir_subject = f'New {event["module"]}_{event["dataset"]} Event From Security Onion' - rtir_text = description - rtir_rt = rt.Rt(rtir_url + '/' + rtir_api, rtir_user, rtir_pass, verify_cert=verify_cert) - rtir_rt.login() - rtir_rt.create_ticket(Queue=rtir_queue, Owner=rtir_creator, Subject=rtir_subject, Text=rtir_text) - rtir_rt.logout() - - # Redirect to RTIR instance - return redirect(rtir_url) - - -def createSlackAlert(esid): - search = get_hits(esid) - slack_url = parser.get('slack', 'slack_url') - webhook_url = parser.get('slack', 'slack_webhook') - for result in search['hits']['hits']: - result = result['_source'] - message = result['message'] - description = str(message) - slack_data = {'text': description} - - response = requests.post( - webhook_url, data=json.dumps(slack_data), - headers={'Content-Type': 'application/json'} - ) - if response.status_code != 200: - raise ValueError( - 'Request to slack returned an error %s, the response is:\n%s' - % (response.status_code, response.text) - ) - - # Redirect to Slack workspace - return redirect(slack_url) - - -def createFIREvent(esid): - search = get_hits(esid) - fir_api = '/api/incidents' - fir_url = parser.get('fir', 'fir_url') - fir_token = parser.get('fir', 'fir_token') - actor = parser.get('fir', 'fir_actor') - category = parser.get('fir', 'fir_category') - confidentiality = parser.get('fir', 'fir_confidentiality') - detection = parser.get('fir', 'fir_detection') - plan = parser.get('fir', 'fir_plan') - severity = parser.get('fir', 'fir_severity') - verify_cert = parser.getboolean('fir', 'fir_verifycert', fallback=False) - - for result in search['hits']['hits']: - result = result['_source'] - message = result['message'] - event = result['event'] - description = str(message) - - subject = f'New {event["module"]}_{event["dataset"]} Event From Security Onion' - - headers = { - 'Authorization': 'Token ' + fir_token, - 'Content-type': 'application/json' - } - - data = { - "actor": actor, - "category": category, - "confidentiality": confidentiality, - "description": description, - "detection": detection, - "plan": plan, - "severity": int(severity), - "subject": subject - } - - requests.post(fir_url + fir_api, headers=headers, data=json.dumps(data), verify=verify_cert) - - # Redirect to FIR instance - return redirect(fir_url + '/events') - - -def playbookWebhook(webhook_content): - """ - Process incoming playbook webhook. - - """ - action = webhook_content['payload']['action'] - issue_tracker_name = webhook_content['payload']['issue']['tracker']['name'] - issue_id = webhook_content['payload']['issue']['id'] - issue_status_name = webhook_content['payload']['issue']['status']['name'] - - if action == 'updated' and issue_tracker_name == 'Play': - journal_details = webhook_content['payload']['journal']['details'] - detection_updated = False - for item in journal_details: - # Check to see if the Sigma field has changed - if item['prop_key'] == '9': - # Sigma field updated (Sigma field ID is 9) --> Call function - Update Play metadata - playbook.play_update(issue_id) - # Run Play Unit Test (If Target Log exists) - #playbook.play_unit_test(issue_id,"Sigma Updated") - # Create/Update ElastAlert config - if issue_status_name == "Active" and not detection_updated: - detection_updated = True - playbook.elastalert_update(issue_id) - elif issue_status_name == "Inactive" and not detection_updated: - detection_updated = True - playbook.elastalert_disable(issue_id) - - # Check to see if the Play status has changed to Active or Inactive - elif item['prop_key'] == 'status_id' and not detection_updated: - if item['value'] == '3': - # Status = Active --> Enable EA & TheHive - detection_updated = True - playbook.elastalert_update(issue_id) - elif item['value'] == '4': - # Status = Inactive --> Disable EA - detection_updated = True - playbook.elastalert_disable(issue_id) - # Check to see if the Play Custom Filter (Field ID 21) has been updated - if so, update elastalert rule - elif item['prop_key'] == '21': - playbook.play_update(issue_id) - if issue_status_name == "Active": playbook.elastalert_update(issue_id) - if item['prop_key'] == '30': - playbook.play_template_backup(issue_id) - if item['prop_key'] == '27': - playbook.elastalert_update(issue_id) - - #New section added for Sigma Option Changes - if action == 'updated' and issue_tracker_name == 'Sigma Options': - journal_details = webhook_content['payload']['journal']['details'] - for item in journal_details: - if item['prop_key'] == '37' and item['value'] == '1': - playbook.play_backup(issue_id) - if item['prop_key'] == '38' and item['value'] == '1': - playbook.play_import(issue_id) - if item['prop_key'] == '39' and item['value'] == '1': - playbook.play_clear_update_available(issue_id) - - #New Section added for email option changes - if action == 'updated' and issue_tracker_name == 'Email Options': - playbook.smtp_update(issue_id) - return "success" - - -def playbookSigmac(sigma): - """ - Process incoming Sigma. - - """ - esquery = playbook.sigmac_generate(sigma) - - return esquery - - -def playbookCreatePlay(sigma_raw, sigma_dict): - """ - Process incoming Sigma Yaml. - - """ - play_data = playbook.play_create(sigma_raw, sigma_dict) - - return jsonify(play_data) - -def showESResult(esid): - search = get_hits(esid) - for result in search['hits']['hits']: - esindex = result['_index'] - result = result['_source'] - - return render_template("result.html", result=result, esindex=esindex) - - -def eventModifyFields(esid): - search = get_hits(esid) - for result in search['hits']['hits']: - esindex = result['_index'] - result = result['_source'] - tags = result['tags'] - form = DefaultForm() - return render_template('update_event.html', result=result, esindex=esindex, esid=esid, tags=tags, form=form) - - -def eventUpdateFields(esindex, esid, tags): - do_update(esindex, esid, tags) - return showESResult(esid) - - -def processHiveReq(webhook_content): - api = hiveInit() - event_details = getHiveStatus(webhook_content) - # event_id = event_details.split(' ')[0] - event_status = event_details.split(' ')[1] - auto_analyze_alerts = parser.get('cortex', 'auto_analyze_alerts') - - # Run analyzers before case import - if event_status == "alert_creation": - if auto_analyze_alerts == "yes": - sys.stdout.flush() - alert_id = webhook_content['objectId'] - observables = webhook_content['object']['artifacts'] - analyzeAlertObservables(alert_id, observables) - - # Check to see if we are creating a new task - if event_status == "case_task_creation": - headers = { - 'Authorization': 'Bearer ' + hive_key - } - task_id = webhook_content['objectId'] - task_status = "InProgress" - task_case = webhook_content['object']['_parent'] - task_title = webhook_content['object']['title'] - - # Check the task to see if it matches our conventionm for auto-analyze tasks (via Playbook, etc) - if "Analyzer" in task_title: - analyzer_minimal = task_title.split(" - ")[1] - enabled_analyzers = getCortexAnalyzers() - supported_analyzers = parser.get('cortex', 'supported_analyzers').split(",") - if analyzer_minimal in supported_analyzers: - # Start task - requests.patch(hive_url + '/api/case/task/' + task_id, headers=headers, - data={'status': task_status}, verify=hive_verifycert) - # Get observables related to case - observables = api.get_case_observables(task_case).json() - for analyzer in enabled_analyzers: - if analyzer_minimal in analyzer['name']: - for cortexId in analyzer['cortexIds']: - # Look through all of our observables - for observable in observables: - # Check to see if observable type supported by analyzer - if observable['dataType'] in analyzer['dataTypeList']: - # Run analyzer - api.run_analyzer(cortexId, observable['id'], analyzer['id']) - # analyzeCaseObservables(observables) - # Add task log - headers = { - 'Authorization': 'Bearer ' + hive_key, - 'Content-Type': 'application/json' - } - task_log = "Automation - Ran " + analyzer_minimal + " analyzer." - data = {'message': task_log} - requests.post(hive_url + '/api/case/task/' + task_id + '/log', headers=headers, - data=json.dumps(data), verify=hive_verifycert) - - # Close task - task_status = "Completed" - requests.patch(hive_url + '/api/case/task/' + task_id, headers=headers, - data={'status': task_status}, verify=hive_verifycert) - - sys.stdout.flush() - - return "success" - - -def analyzeAlertObservables(alert_id, observables): - """ - Analyze TheHive observables - """ - alert_id = alert_id - cortex_url = parser.get('cortex', 'cortex_url') - cortex_key = parser.get('cortex', 'cortex_key') - - api = hiveInit() - analyzers = getCortexAnalyzers() - for analyzer in analyzers: - # Get our list of Cortex servers (IDs) - for cortexId in analyzer['cortexIds']: - # Look through all of our observables - for observable in observables: - # Check to see if observable type supported by analyzer - if observable['dataType'] in analyzer['dataTypeList']: - headers = { - 'Authorization': 'Bearer ' + cortex_key, - 'Content-Type': 'application/json' - } - - data = { - "data": observable['data'], - "dataType": observable['dataType'] - } - # Run analyzer - startjob = requests.post(cortex_url + '/api/analyzer/' + analyzer['id'] + '/run', headers=headers, - data=json.dumps(data), verify=hive_verifycert) - wait_interval = '10second' - job_id = startjob.json()['id'] - headers = { - 'Authorization': 'Bearer ' + cortex_key - } - - getresults = requests.get(cortex_url + '/api/job/' + job_id + '/waitreport?atMost=' + wait_interval, - headers=headers, verify=hive_verifycert) - - analyzer_results = getresults.json() - job_status = analyzer_results['status'] - if job_status == "Success": - level = analyzer_results['report']['summary']['taxonomies'][0]['level'] - customFields = {"customFields": {}} - reputation = dict(order=1, string=level) - customFields['customFields']['reputation'] = reputation - headers = { - 'Authorization': 'Bearer ' + hive_key, - 'Content-Type': 'application/json' - } - data = json.dumps(customFields) - requests.patch(hive_url + '/api/alert/' + alert_id, headers=headers, - data=data, verify=hive_verifycert) - else: - pass - return "OK" - - -def getHiveStatus(webhook_content): - """ - Process incoming TheHive webhook - """ - - operation = webhook_content['operation'] - object_type = webhook_content['objectType'] - object = webhook_content['object'] - content_id = object['id'] - status = str(object_type).lower() + "_" + str(operation).lower() - sys.stdout.flush() - return '{} {}'.format(content_id, status) - - -def getCortexAnalyzers(): - headers = { - 'Authorization': 'Bearer ' + hive_key - } - - response = requests.get(hive_url + '/api/connector/cortex/analyzer', headers=headers, verify=hive_verifycert) - analyzers = json.loads(response.text) - return analyzers diff --git a/so-soctopus/so-soctopus/forms.py b/so-soctopus/so-soctopus/forms.py deleted file mode 100644 index 1518804..0000000 --- a/so-soctopus/so-soctopus/forms.py +++ /dev/null @@ -1,8 +0,0 @@ -from flask import Flask, render_template -from flask_wtf import FlaskForm -from wtforms import StringField - - -class DefaultForm(FlaskForm): - esindex = StringField('esindex') - esid = StringField('esid') diff --git a/so-soctopus/so-soctopus/grr.py b/so-soctopus/so-soctopus/grr.py deleted file mode 100644 index 9d2730d..0000000 --- a/so-soctopus/so-soctopus/grr.py +++ /dev/null @@ -1,45 +0,0 @@ -import json -import requests -from requests.auth import HTTPBasicAuth - -def listProcessFlow(client_id,grr_url,headers,cookies,grr_user,grr_pass): - data = { - "flow": { - "args": { - "@type": "type.googleapis.com/ListProcessesArgs" - }, - "name": "ListProcesses" - } - } - - response = requests.post(grr_url + "/api/v2/clients/" + client_id + "/flows", - headers=headers, data=json.dumps(data), - cookies=cookies, auth=HTTPBasicAuth(grr_user, grr_pass)) - - decoded_response = response.content.decode("utf-8") - result = decoded_response.lstrip(")]}'") - flow_result = json.loads(result) - flow_id = flow_result["flowId"] - - return flow_id - - -def checkFlowStatus(client_id,grr_url,flow_id,headers,cookies,grr_user,grr_pass): - response = requests.get(grr_url + "/api/clients/" + client_id + "/flows/" + flow_id, - headers=headers, - cookies=cookies, auth=HTTPBasicAuth(grr_user, grr_pass)) - - decoded_response = response.content.decode("utf-8") - result = decoded_response.lstrip(")]}'") - status_check = json.loads(result) - status = str(status_check["value"]["state"]["value"].lower()) - - return status - -def downloadFlowResults(client_id,grr_url,flow_id,headers,cookies,grr_user,grr_pass): - response = requests.get(grr_url + "/api/clients/" + client_id + "/flows/" + flow_id + "/exported-results/csv-zip", - headers=headers, - cookies=cookies, auth=HTTPBasicAuth(grr_user, grr_pass)) - filepath = "/tmp/soctopus/" + client_id + ".zip" - with open(filepath, "wb") as compressed_flow_results: - compressed_flow_results.write(response.content) diff --git a/so-soctopus/so-soctopus/helpers.py b/so-soctopus/so-soctopus/helpers.py deleted file mode 100644 index 6346786..0000000 --- a/so-soctopus/so-soctopus/helpers.py +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -import requests -from requests.utils import quote -from config import parser, es_index - -esserver = parser.get('es', 'es_url') -es_user = parser.get('es', 'es_user', fallback="") -es_pass = parser.get('es', "es_pass", fallback="") -es_verifycert = parser.getboolean('es', 'es_verifycert', fallback=False) - -search_index = f'*:{es_index}' - - -def get_hits(esid: str) -> dict: - query = {"query": {"bool": {"must": {"match": {'_id': esid}}}}} - res_json = __es_search__(query) - if res_json['hits']['total']['value'] > 0: - return res_json - - -def get_conn(conn_id: str) -> dict: - query = {"bool": {"must": [{"match": {"event_type": "bro_conn"}}, {"match": {"uid": conn_id}}]}} - res_json = __es_search__(query) - if res_json['hits']['total']['value'] > 0: - return res_json - - -def do_update(esindex: str, esid: str, tags: str) -> dict: - local_index = esindex.split(":")[1] - query = {"doc": {"tags": tags}} - return __es_update__(index=local_index, es_query=query, es_id=esid) - - -def __es_search__(es_query: dict) -> dict: - if es_user and es_pass: - response = requests.get(f'{esserver}/{quote(search_index)}/_search', json=es_query, - verify=es_verifycert, auth=(es_user, es_pass)) - else: - response = requests.get(f'{esserver}/{quote(search_index)}/_search', json=es_query, - verify=es_verifycert) - return response.json() - - -def __es_update__(index: str, es_query: dict, es_id: str) -> dict: - if es_user and es_pass: - response = requests.post(f'{esserver}/{quote(index)}/_update/{quote(es_id)}?refresh=true', - json=es_query, verify=es_verifycert, auth=(es_user, es_pass)) - else: - response = requests.post(f'{esserver}/{quote(index)}/_update/{quote(es_id)}?refresh=true', - json=es_query, verify=es_verifycert) - return response.json() diff --git a/so-soctopus/so-soctopus/playbook.py b/so-soctopus/so-soctopus/playbook.py deleted file mode 100644 index 40d55d0..0000000 --- a/so-soctopus/so-soctopus/playbook.py +++ /dev/null @@ -1,707 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -import fileinput -import json -import os -import re -import shutil -import subprocess -import uuid -from time import gmtime, strftime -import sys -import tempfile -import glob -from pathlib import Path - -import requests - -from ruamel.yaml import YAML -from ruamel.yaml.compat import StringIO - -import ruamel.yaml -from config import parser - -yaml = ruamel.yaml.YAML(typ='safe') - -playbook_headers = {'X-Redmine-API-Key': parser.get("playbook", "playbook_key"), 'Content-Type': 'application/json'} -playbook_url = parser.get("playbook", "playbook_url") -playbook_external_url = parser.get("playbook", "playbook_ext_url") -playbook_unit_test_index = parser.get("playbook", "playbook_unit_test_index") -playbook_verifycert = parser.getboolean('playbook', 'playbook_verifycert', fallback=False) - - -es_url = parser.get("es", "es_url") -es_ip = parser.get("es", "es_ip") -es_verifycert = parser.getboolean('es', 'es_verifycert', fallback=False) - -# Moves a community rule into /custom/sigma/ for hash comparison for rule updates -# This function is called when a user selects to disable auto update rules -def play_template_backup(issue_id): - - play_meta = play_metadata(issue_id) - if play_meta['playbook'] == "community": - if play_meta['auto_update_sigma'] == "0": # Do not autoupdate Sigma is checked - source = str(play_meta['sigma_file']) - fileloc = source.rfind('/') - file = source[source.rfind('/') + 1:] - dst = "/SOCtopus/custom/sigma/" + file - shutil.copyfile(source, dst) - else: - source = str(play_meta['sigma_file']) - file = source[source.rfind('/') + 1:] - dst = "/SOCtopus/custom/sigma/" + file - if os.path.exists(dst): - os.remove(dst) - else: - update_payload = {"issue": {"project_id": 3, "tracker": "Play", "custom_fields": [ - {"id": 30, "name": "Auto Update Sigma", "value": 0}]}} #changed adding filename to sigma after importing - url = f"{playbook_url}/issues/{issue_id}.json" - r = requests.put(url, data=json.dumps(update_payload), headers=playbook_headers, verify=False) - - return - -# Updates the SMTP template when an SMTP option is changed -def smtp_update(issue_id): - url = f"{playbook_url}/issues/{issue_id}.json" - smtp_tls = "false" - r = requests.get(url, headers=playbook_headers, verify=playbook_verifycert).json() - - - for item in r['issue']['custom_fields']: - if item['name'] == "SMTP Server": - smtp_host = re.sub(r'["\']', '', item['value']) - elif item['name'] == "SMTP Port": - smtp_port = re.sub(r'["\']', '', item['value']) - elif item['name'] == "SMTP TLS Enabled": - if item['value'] == "1": - smtp_tls = "true" - elif item['name'] == "Alert From Email Address": - smtp_from = re.sub(r'["\']', '', item['value']) - elif item['name'] == "Alert Email Address": - smtp_to = re.sub(r'["\']', '', item['value']) - - - f = open("/etc/playbook-rules/generic_email.template", 'r+') - content = f.read() - f.seek(0) - f.truncate() - content = re.sub(r'email:.*', f"email: \"{smtp_to}\"", content.rstrip()) - content = re.sub(r'from_addr:.*', f"from_addr: \"{smtp_from}\"", content.rstrip()) - content = re.sub(r'smtp_host:.*', f"smtp_host: \"{smtp_host}\"", content.rstrip()) - content = re.sub(r'smtp_port:.*', f"smtp_port: {smtp_port}", content.rstrip()) - content = re.sub(r'smtp_ssl:.*', f"smtp_ssl: {smtp_tls}", content.rstrip()) - f.write(content) - f.close() - - - success = smtp_update_rule() - - return - -# Recreates elastalert rule for any play with email notifications enabled - this will update the SMTP configuration in the rule -# Called from smtp_update when a SMTP option is changed -def smtp_update_rule(): - plays = [] - - plays = get_plays() - - for play in plays: - if play['email_notifications'] == "1" and play['status'] == "Active": - elastalert_update(play['issue_id']) - - return success - -def get_plays(): - plays = [] - offset = 0 - url = f"{playbook_url}/issues.json?offset=0&tracker_id=1&limit=100" - response = requests.get(url, headers=playbook_headers, verify=False).json() - - for i in response['issues']: - play_meta = play_metadata(i['id']) - plays.append(play_meta) - - while offset < response['total_count']: - offset += 100 - url = f"{playbook_url}/issues.json?offset={offset}&tracker_id=1&limit=100" - response = requests.get(url, headers=playbook_headers, verify=False).json() - print(f"offset: {offset}") - - for i in response['issues']: - play_meta = play_metadata(i['id']) - plays.append(play_meta) - - return plays - -# Imports rules when Sigma Options -> Import Rules is selected in Redmine -# If a rule (matched by rule id) exists, the rule is updated with the imported rule, if not a new rule is created -# Template is moved to /custom/sigma/ as well -def play_import(enable_play): - import_count = 0 - ruleset_path = "/SOCtopus/sigma-import/" - filen = "" - - for filename in Path(ruleset_path).glob('*.yml'): - filen = str(filename) - with open(filename, encoding="utf-8") as fpi2: - raw = fpi2.read() - repo_sigma = yaml.load(raw) - - creation_status = play_create(raw, repo_sigma,"imported", "import", "import", "DRL-1.0", filen, "NA", enable_play) #changed filename added to play_create - if creation_status['play_creation'] == 201: - import_count = import_count + 1 - - import_status = f"{import_count} Play/s Imported Successfully." - - return import_status - -# Imports rules when Sigma Options -> Backup is selected - backs up up all non community rules or community rules with auto update disabled -# Backed up to /custom/backup -def play_backup(issue_id): - plays = [] - - plays = get_plays() - - for play in plays: - if play['playbook'] != "community" or play['auto_update_sigma'] == "0": - file = ("/SOCtopus/custom/backup/" + play['title'] + ".yml").replace(" ", "_") - with open(file, 'w') as f: - f.write(play['sigma_raw']) - - update_payload = {"issue": {"subject": "Sigma Options", "project_id": 3, "tracker": "Sigma Options", "custom_fields": [ - {"id": 37, "name": "Backup Custom Sigmas", "value": 0}]}} #changed adding filename to sigma after importing - url = f"{playbook_url}/issues/{issue_id}.json" - r = requests.put(url, data=json.dumps(update_payload), headers=playbook_headers, verify=False) - return - -# Removes the update available flag when Sigma Options - Remove update available (all) is run -# If a major Sigma update is implemented on all rules (format change for instance), users may want to run this to remove the udpate available flag on all rules -def play_clear_update_available(issue_id): - plays = [] - - plays = get_plays() - - for play in plays: - if play['update_available'] == "1": - update_payload = {"issue": {"custom_fields": [ - {"id": 31, "name": "Update Available", "value": 0}]}} #changed adding filename to sigma after importing - url = f"{playbook_url}/issues/{play['issue_id']}.json" - r = requests.put(url, data=json.dumps(update_payload), headers=playbook_headers, verify=False) - - update_payload = {"issue": {"custom_fields": [ - {"id": 39, "name": "Clear Update Status (all)", "value": 0}]}} #changed adding filename to sigma after importing - url = f"{playbook_url}/issues/{issue_id}.json" - r = requests.put(url, data=json.dumps(update_payload), headers=playbook_headers, verify=False) - - return - -def navigator_update(): - # Get play data from Redmine - url = f"{playbook_url}/issues.json?status_id=3&limit=100" - response_data = requests.get(url, headers=playbook_headers, verify=playbook_verifycert).json() - - technique_payload = [] - for play in response_data['issues']: - for custom_field in play['custom_fields']: - if custom_field['id'] == 15 and (custom_field['value']): - technique_id = custom_field['value'][0] - technique_payload.append( - {"techniqueID": technique_id, "score": 100, "comment": "", "enabled": True, "metadata": []}) - - try: - with open('/etc/playbook/nav_layer_playbook.json') as nav_layer_r: - curr_json = json.load(nav_layer_r) - curr_json['description'] = f'Current Coverage of Playbook - Updated {strftime("%Y-%m-%d %H:%M", gmtime())}' - curr_json['techniques'] = technique_payload - - except FileNotFoundError as e: - curr_json = \ - { - "name": "Playbook", - "domain": "mitre-enterprise", - "description": f'Current Coverage of Playbook - Updated {strftime("%Y-%m-%d %H:%M", gmtime())}', - "filters": { - "stages": ["act"], - "platforms": [ - "windows", - "linux", - "mac" - ] - }, - "sorting": 0, - "viewMode": 0, - "hideDisabled": False, - "techniques": technique_payload, - "gradient": { - "colors": ["#ffffff00", "#66b1ffff"], - "minValue": 0, - "maxValue": 100 - }, - "metadata": [], - "showTacticRowBackground": False, - "tacticRowBackground": "#dddddd", - "selectTechniquesAcrossTactics": False - } - - with open('/etc/playbook/nav_layer_playbook.json', 'w+') as nav_layer_w: - json.dump(curr_json, nav_layer_w) - - - -def elastalert_update(issue_id): - # Get play metadata - specifically the raw Sigma - play_meta = play_metadata(issue_id) - - # Generate Sigma metadata - sigma_meta = sigma_metadata(play_meta['sigma_raw'], play_meta['sigma_dict'], play_meta['playid'], play_meta['target_log']) - - play_file = f"/etc/playbook-rules/{play_meta['playid']}.yaml" - - if os.path.exists(play_file): - os.remove(play_file) - - if sigma_meta['level'] == "low": - event_severity = 1 - elif sigma_meta['level'] == "medium": - event_severity = 2 - elif sigma_meta['level'] == "high": - event_severity = 3 - elif sigma_meta['level'] == "critical": - event_severity = 4 - elif sigma_meta['level'] == "": - event_severity = 2 - - if play_meta['group'] != None: - rule_category = play_meta['group'] - elif play_meta['ruleset'] != None: - rule_category = play_meta['ruleset'] - else: - rule_category = "None" - - try: - if sigma_meta['product'] == 'osquery': - shutil.copy('/etc/playbook-rules/osquery.template', play_file) - elif sigma_meta['product'] != 'osquery' and play_meta['email_notifications'] == "1": - shutil.copy('/etc/playbook-rules/generic_email.template', play_file) - else: - shutil.copy('/etc/playbook-rules/generic.template', play_file) - with open(play_file, 'r+') as f: - content = f.read() - f.seek(0) - f.truncate() - # Sub details in the ES_Alerter - play URL, etc - content = re.sub(r'rule\.category:.*', f"rule.category: \"{rule_category}\"", content.rstrip()) - content = re.sub(r'\/6000', f"/{issue_id}", content.rstrip()) - content = re.sub(r'play_title:.\"\"', f"play_title: \"{sigma_meta['title']}\"", content.rstrip()) - content = re.sub(r'play_id:.\"\"', f"play_id: \"{play_meta['playid']}\"", content.rstrip()) - content = re.sub(r'event\.severity:.*', f"event.severity: {event_severity}", content.rstrip()) - content = re.sub(r'sigma_level:.\"\"', f"sigma_level: \"{sigma_meta['level']}\"\n", content.rstrip()) - content = re.sub(r'name:\s\S*', f"name: \"{sigma_meta['title']} - {play_meta['playid']}\"", content.rstrip()) - content = f"{content}\n- eql: >\n {sigma_meta['raw_elastalert']}\n" - f.write(content) - f.close() - - # Check newly-written elastalert config file to make sure it is valid - elastalert_config_status = "invalid" - file = open(play_file, "r") - for line in file: - if re.search('realert', line): - elastalert_config_status = "valid" - - if elastalert_config_status != "valid": - print ("Elastalert rule file invalid - deleting it") - os.remove(play_file) - - except FileNotFoundError: - print("ElastAlert Template File not found") - except: - print("Something else went wrong") - if os.path.exists(play_file): - os.remove(play_file) - - return 200, "success" - - -def elastalert_disable(issue_id): - play = play_metadata(issue_id) - play_file = f"/etc/playbook-rules/{play['playid']}.yaml" - if os.path.exists(play_file): - os.remove(play_file) - return 200, "success" - - -def play_update(issue_id): - # Get play metadata - specifically the raw Sigma - play_meta = play_metadata(issue_id) - - # Generate Sigma metadata - sigma_meta = sigma_metadata(play_meta['sigma_raw'], play_meta['sigma_dict'], play_meta['playid'], play_meta['target_log']) - - payload = {"issue": {"subject": sigma_meta['title'], "project_id": 1, "tracker": "Play", "custom_fields": [ \ - {"id": 1, "name": "Title", "value": sigma_meta['title']}, \ - {"id": 10, "name": "Level", "value": sigma_meta['level']}, \ - {"id": 6, "name": "ElastAlert Config", "value": sigma_meta['esquery']}, \ - {"id": 20, "name": "Product", "value": sigma_meta['product']}, \ - {"id": 3, "name": "Objective", "value": sigma_meta['description']}, \ - {"id": 2, "name": "Author", "value": sigma_meta['author']}, \ - {"id": 8, "name": "References", "value": sigma_meta['references']}, \ - {"id": 5, "name": "Analysis", "value": f"{sigma_meta['falsepositives']}{sigma_meta['logfields']}"}, \ - {"id": 15, "name": "Tags", "value": sigma_meta['tags']}]}} - - url = f"{playbook_url}/issues/{issue_id}.json" - r = requests.put(url, data=json.dumps(payload), headers=playbook_headers, verify=playbook_verifycert) - - return 'success', 200 - - -def play_metadata(issue_id): - play = dict() - url = f"{playbook_url}/issues/{issue_id}.json" - - r = requests.get(url, headers=playbook_headers, verify=playbook_verifycert).json() - if r['issue']['status']['name'] == "Active": - status = "Active" - else: - status = "Not_Active" - for item in r['issue']['custom_fields']: - if item['name'] == "Sigma": - sigma_raw = item['value'] - elif item['name'] == "PlayID": - play['playid'] = item['value'] - elif item['name'] == "Playbook": - play['playbook'] = item['value'] - elif item['name'] == "Case Analyzers": - play['case_analyzers'] = item['value'] - elif item['name'] == "Rule ID": - play['sigma_id'] = item['value'] - elif item['name'] == "Custom Filter": - play['target_log'] = item['value'] - elif item['name'] == "Ruleset": - play['ruleset'] = item['value'] - elif item['name'] == "Group": - play['group'] = item['value'] - elif item['name'] == "Email Notifications": - play['email_notifications'] = item['value'] - elif item['name'] == "Auto Update Sigma": - play['auto_update_sigma'] = item['value'] - elif item['name'] == "Update Available": - play['update_available'] = item['value'] - elif item['name'] == "Sigma File": - play['sigma_file'] = item['value'] - elif item['name'] == "Sigma File": - play['sigma_file'] = item['value'] - elif item['name'] == "Title": - play['title'] = item['value'] - # Cleanup the Sigma data to get it ready for parsing - sigma_raw = re.sub( - "{{collapse\(View Sigma\)|
|
|}}", "", sigma_raw) - sigma_dict = yaml.load(sigma_raw) - - return { - 'issue_id': issue_id, - 'playid': play.get('playid'), - 'sigma_dict': sigma_dict, - 'sigma_raw': sigma_raw, - 'sigma_formatted': f'{{{{collapse(View Sigma)\n
\n\n{sigma_raw}\n
\n}}}}', - 'sigma_id': play.get('sigma_id'), - 'playbook': play.get('playbook'), - 'case_analyzers': play.get('case_analyzers'), - 'target_log': play.get('target_log'), - 'ruleset': play.get('ruleset'), - 'group': play.get('group'), - 'email_notifications': play.get('email_notifications'), - 'auto_update_sigma': play.get('auto_update_sigma'), - 'update_available': play.get('update_available'), - 'sigma_file': play.get('sigma_file'), - 'title': play.get('title'), - 'status': status - } - - -def sigmac_generate(sigma): - # Call sigmac tool to generate Elasticsearch config - temp_file = tempfile.NamedTemporaryFile(mode='w+t') - print(sigma, file=temp_file) - temp_file.seek(0) - - sigmac_output = subprocess.run(["sigmac", "-t", "es-eql", "-c", "playbook/securityonion-baseline.yml", "--backend-option", "keyword_whitelist=source.ip,destination.ip,source.port,destination.port,message,rule.uuid", temp_file.name], - stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='ascii') - - es_query = sigmac_output.stdout.strip() + sigmac_output.stderr.strip() - return es_query - - -def sigma_metadata(sigma_raw, sigma, play_id, custom_condition=""): - play = dict() - - # If there is a custom filter, rewrite the Sigma rule - if custom_condition != "": - sigma_dict = dict() - sigma_dict = yaml.load(sigma_raw) - old_condition = sigma_dict['detection']['condition'] - sigma_dict['detection']['condition'] = f'({old_condition}) and not 1 of sofilter*' - sigma_dict['detection'] = sigma_dict['detection'] | yaml.load(custom_condition) - sigma_raw = sigma_dict - - # Call sigmac tool to generate ElastAlert config - temp_file = tempfile.NamedTemporaryFile(mode='w+t') - print(sigma_raw, file=temp_file) - temp_file.seek(0) - - product = sigma['logsource']['product'] if 'product' in sigma['logsource'] else 'none' - - esquery = subprocess.run(["sigmac", "-t", "es-eql", "-c", "playbook/securityonion-baseline.yml", "--backend-option", "keyword_whitelist=source.ip,destination.ip,source.port,destination.port,message,rule.uuid", temp_file.name], - stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='ascii') - ea_config = esquery.stdout.strip() - - # Prep ATT&CK Tags - tags = re.findall(r"t\d{4}", ''.join( - sigma.get('tags'))) if sigma.get('tags') else '' - play['tags'] = [element.upper() for element in tags] - - return { - 'references': '\n'.join(sigma.get('references')) if sigma.get('references') else 'none', - 'title': sigma.get('title') if sigma.get('title') else 'none', - 'description': sigma.get('description') if sigma.get('description') else 'none', - 'level': sigma.get('level') if sigma.get('level') else 'none', - 'tags': play['tags'], - 'sigma': f'{{{{collapse(View Sigma)\n
\n\n{yaml2.dump(sigma)}\n
\n}}}}', - 'author': sigma.get('author') if sigma.get('author') else 'none', - 'falsepositives': '_False Positives_\n' + '\n'.join(sigma.get('falsepositives')) if sigma.get( - 'falsepositives') else '_False Positives_\n Unknown', - 'logfields': '\n\n_Interesting Log Fields_\n' + '\n'.join(sigma.get('fields')) if sigma.get('fields') else '', - 'esquery': f'{{{{collapse(View ElastAlert Config)\n
\n\n{ea_config}\n
\n}}}}', - 'raw_elastalert': ea_config, - 'tasks': sigma.get('tasks'), - 'product': product.lower(), - 'sigid': sigma.get('id') if sigma.get('id') else 'none' - } - - -def play_create(sigma_raw, sigma_dict, playbook="imported", ruleset="", group="", license="", filename="", sigma_url="", enable=False): - # Expects Sigma in dict format - - # Generate a unique ID for the Play - play_id = uuid.uuid4().hex - - # Extract out all the relevant metadata from the Sigma YAML - play = sigma_metadata(sigma_raw, sigma_dict, play_id[0:9]) - - # If ElastAlert config = "", return with an error - if play['raw_elastalert'] == "": - return "Sigmac error when generating ElastAlert config" - play_notes = "Play imported successfully." - #play_status = "6" if play['raw_elastalert'] == "" else "2" - #play_notes = "Play status set to Disabled - Sigmac error when generating ElastAlert config." \ - # if play['raw_elastalert'] == "" else "Play imported successfully." - - # Create the payload - payload = {"issue": {"subject": play['title'], "project_id": 1, "status_id": "2", "tracker": "Play", - "custom_fields": [ - {"id": 1, "name": "Title", "value": play['title']}, - {"id": 13, "name": "Playbook", "value": playbook}, - {"id": 6, "name": "ElastAlert Config", "value": play['esquery']}, - {"id": 10, "name": "Level", "value": play['level']}, - {"id": 20, "name": "Product", "value": play['product']}, - {"id": 3, "name": "Objective", "value": play['description']}, - {"id": 2, "name": "Author", "value": play['author']}, - {"id": 8, "name": "References", "value": play['references']}, - {"id": 5, "name": "Analysis", "value": f"{play['falsepositives']}{play['logfields']}"}, - {"id": 11, "name": "PlayID", "value": play_id[0:9]}, - {"id": 15, "name": "Tags", "value": play['tags']}, - {"id": 12, "name": "Rule ID", "value": play['sigid']}, - {"id": 9, "name": "Sigma", "value": play['sigma']}, - {"id": 18, "name": "Ruleset", "value": ruleset}, - {"id": 19, "name": "Group", "value": group}, - {"id": 26, "name": "License", "value": license}, - {"id": 28, "name": "Sigma URL", "value": sigma_url}, - {"id": 27, "name": "Sigma File", "value": filename}]}} #changed added update of filename field - - - # POST the payload to Redmine to create the Play (ie Redmine issue) - url = f"{playbook_url}/issues.json" - r = requests.post(url, data=json.dumps(payload), headers=playbook_headers, verify=playbook_verifycert) - - # If Play creation was successful, update the Play notes & return the Play URL - if r.status_code == 201: - # Update the Play notes - notes_payload = {"issue": {"notes": play_notes}} - new_issue_id = r.json() - url = f"{playbook_url}/issues/{new_issue_id['issue']['id']}.json" - r = requests.put(url, data=json.dumps(notes_payload), headers=playbook_headers, verify=playbook_verifycert) - # Notate success & Play URL - play_creation = 201 - play_url = f"{playbook_external_url}/issues/{new_issue_id['issue']['id']}" - if enable: - enable_payload = {"issue": {"status_id": "3"}} - enable_play = requests.put(url, data=json.dumps(enable_payload), headers=playbook_headers, verify=playbook_verifycert) - # If Play creation was not successful, return the status code - else: - print("Play Creation Error - " + r.text, file=sys.stderr) - play_creation = r.status_code - play_url = "failed" - - return { - 'play_creation': play_creation, - 'play_url': play_url - } - - -def play_unit_test (issue_id,unit_test_trigger,only_normalize=False): - - # Get Play metadata - play_meta = play_metadata(issue_id) - - if not play_meta['target_log']: - return "No Target Log" - - # Get Sigma metadata - sigma_meta = sigma_metadata(play_meta['sigma_raw'], play_meta['sigma_dict'], play_meta['playid']) - - # If needed, normalize the Target Log if the trigger is "Target Log Updated" - if unit_test_trigger == "Target Log Updated": - if not "collapse(View Log)" in play_meta['target_log']: - play_unit_test_normalize_log(play_meta['target_log'],issue_id,sigma_meta['title']) - if only_normalize: - return "only_normalize = True" - - # Insert the Target Log into Elasticsearch - insert_log = play_unit_test_insert_log (play_meta['target_log'],play_meta['playid']) - if insert_log['status_code'] != 201: - play_unit_test_closeout(issue_id,"Failed",unit_test_trigger,f"Target Log insert into Elasticsearch failed: {insert_log['debug'] }") - return - - # Tweak Play Elastalert alert for use with elastalert-test-rule & output to a temp file - newline = '\n' - elastalert_alert = f"es_host: {es_ip}{newline}es_port: 9200{newline}{sigma_meta['raw_elastalert']}{newline}alert: debug" - elastalert_alert = re.sub(r"index: .*", f"index: {playbook_unit_test_index}", elastalert_alert) - - temp_file = tempfile.NamedTemporaryFile(mode='w+t') - print(elastalert_alert, file=temp_file) - temp_file.seek(0) - - # Run elastalert-test-rule - elastalert_output = subprocess.run(["elastalert-test-rule", "--config", "playbook_elastalert_config.yaml", temp_file.name, "--formatted-output"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='ascii') - - if elastalert_output.returncode != 0: - play_unit_test_closeout(issue_id,"Failed",unit_test_trigger,f"Stage - elastalert-test-rule execution failed: {elastalert_output}") - return - - # Cleanup stdout, just leaving the status in JSON format - elastalert_output = json.loads(f"{{{elastalert_output.stdout.strip().split('{', 1)[-1]}") - - if elastalert_output.get('writeback', {}).get('elastalert_error'): - play_unit_test_closeout(issue_id,"Failed",unit_test_trigger,f"Stage - elastalert-test-rule: {elastalert_output['writeback']}") - elif elastalert_output.get('writeback', {}).get('elastalert_status'): - if elastalert_output['writeback']['elastalert_status']['hits'] >= 1: - print ("Passed") - elastalert_status = "Passed" - unit_test_debug = "N/A" - else: - print ("Failed") - elastalert_status = "Failed" - unit_test_debug = f"Stage - elastalert-test-rule: {elastalert_output['writeback']}" - else: - print ("Failed") - elastalert_status = "Failed" - unit_test_debug = f"Stage - elastalert-test-rule: {elastalert_output['writeback']}" - - # Closeout the unit test - play_unit_test_closeout(issue_id,elastalert_status,unit_test_trigger,unit_test_debug) - - return { - 'unit_test_status': elastalert_status - } - -def play_unit_test_normalize_log (target_log, issue_id, play_name): - - normalized_log = f'{{{{collapse(View Log)\n
\n\n{target_log}\n
\n}}}}', - normalized_string = ''.join(normalized_log) - - payload = {"issue": {"project_id": 1, "tracker": "Play", "subject":play_name, "custom_fields": [ \ - {"id": 21, "value": normalized_string}]}} - - url = f"{playbook_url}/issues/{issue_id}.json" - r = requests.put(url, data=json.dumps(payload), headers=playbook_headers, verify=playbook_verifycert) - - return r - -def play_unit_test_insert_log (target_log, playid): - - now_timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S", gmtime()) - - target_log = re.sub("{{collapse\(View Log\)|
|
|}}", "",target_log) - target_log = re.sub(r"@timestamp\":.\".*?,", f"@timestamp\": \"{now_timestamp}\",", target_log) - target_log = json.loads(target_log).pop("_source") - - headers = {'Content-Type': 'application/json'} - url = f"http://{es_ip}:9200/{playbook_unit_test_index}/_doc" - r = requests.post(url, data=json.dumps(target_log), headers=headers, verify=es_verifycert) - - return { - 'status_code': r.status_code, - 'debug': r.__dict__ - } - -def play_unit_test_closeout (issue_id, status, unit_test_trigger, unit_test_debug="N/A"): - newline = '\n' - now_timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S", gmtime()) - play_note = f"Unit Test {status} - {now_timestamp}{newline}Test Triggered by: {unit_test_trigger}{newline}Debug: {unit_test_debug}" - - # Update Play Notes with details of the unit test's outcome - play_update_notes(issue_id,play_note) - - # Update Play Unit-Test field with the status of the unit test (Passed|Failed) - play_update_unit_test_field(issue_id,status) - - return - - -def play_update_notes (issue_id, play_notes): - - notes_payload = {"issue": {"notes": play_notes}} - url = f"{playbook_url}/issues/{issue_id}.json" - r = requests.put(url, data=json.dumps(notes_payload), headers=playbook_headers, verify=playbook_verifycert) - - return { - r.status_code - } - -def play_update_unit_test_field (issue_id, unit_test_status): - - payload = {"issue": {"project_id": 1, "tracker": "Play", "custom_fields": [ \ - {"id": 22, "value": unit_test_status}]}} - - url = f"{playbook_url}/issues/{issue_id}.json" - r = requests.put(url, data=json.dumps(payload), headers=playbook_headers, verify=playbook_verifycert) - - return { - r.status_code - } - - -def play_update_custom_field (issue_id, field_id, field_value, play_name): - - payload = {"issue": {"project_id": 1, "tracker": "Play", "subject":play_name, "custom_fields": [ \ - {"id": field_id, "value": field_value}]}} - - url = f"{playbook_url}/issues/{issue_id}.json" - r = requests.put(url, data=json.dumps(payload), headers=playbook_headers, verify=playbook_verifycert) - - return { - r.status_code - } - - -class YAMLPB(YAML): - def dump(self, data, stream=None, **kw): - inefficient = False - if stream is None: - inefficient = True - stream = StringIO() - YAML.dump(self, data, stream, **kw) - if inefficient: - return stream.getvalue() - - -yaml2 = YAMLPB() diff --git a/so-soctopus/so-soctopus/playbook/securityonion-baseline.yml b/so-soctopus/so-soctopus/playbook/securityonion-baseline.yml deleted file mode 100644 index 3f18bcf..0000000 --- a/so-soctopus/so-soctopus/playbook/securityonion-baseline.yml +++ /dev/null @@ -1,690 +0,0 @@ -# Config file for use with the following logs on Security Onion -# - Any logs shipped with Elastic Agent -title: SO configs -logsources: - osquery: - product: osquery - index: "*:so-osquery-*" - conditions: - event.module: 'osquery' - windows: - product: windows - index: "*:so-*" - idh-opencanary: - product: opencanary - conditions: - event.module: 'opencanary' - suricata-alerts: - product: suricata - service: alert - conditions: - event.module: 'suricata' - event.dataset: 'alert' - zeek-rdp: - product: zeek - service: rdp - conditions: - event.dataset: 'rdp' - strelka-logs: - product: strelka - conditions: - event.module: 'strelka' - windows-application: - product: windows - service: application - conditions: - winlog.channel: Application - windows-security: - product: windows - service: security - conditions: - winlog.channel: Security - windows-system: - product: windows - service: system - conditions: - winlog.channel: System - windows-sysmon: - product: windows - service: sysmon - conditions: - winlog.channel: 'Microsoft-Windows-Sysmon/Operational' - windows-powershell: - product: windows - service: powershell - conditions: - winlog.channel: - - 'Microsoft-Windows-PowerShell/Operational' - - 'PowerShellCore/Operational' - windows-classicpowershell: - product: windows - service: powershell-classic - conditions: - winlog.channel: 'Windows PowerShell' - windows-powershellmodule: - product: windows - category: ps_module - conditions: - winlog.channel: - - 'Microsoft-Windows-PowerShell/Operational' - - 'PowerShellCore/Operational' - event.code: 4103 - windows-powershellscriptblock: - product: windows - category: ps_script - conditions: - winlog.channel: - - 'Microsoft-Windows-PowerShell/Operational' - - 'PowerShellCore/Operational' - event.code: 4104 - windows-defender: - product: windows - service: windefend - conditions: - winlog.channel: 'Microsoft-Windows-Windows Defender/Operational' - windows-printservice-admin: - product: windows - service: printservice-admin - conditions: - winlog.channel: 'Microsoft-Windows-PrintService/Admin' - windows-printservice-operational: - product: windows - service: printservice-operational - conditions: - winlog.channel: 'Microsoft-Windows-PrintService/Operational' - windows-terminalservices-localsessionmanager-operational: - product: windows - service: terminalservices-localsessionmanager - conditions: - winlog.channel: 'Microsoft-Windows-TerminalServices-LocalSessionManager/Operational' - windows-codeintegrity-operational: - product: windows - service: codeintegrity-operational - conditions: - winlog.channel: 'Microsoft-Windows-CodeIntegrity/Operational' - windows-smbclient-security: - product: windows - service: smbclient-security - conditions: - winlog.channel: 'Microsoft-Windows-SmbClient/Security' - windows-applocker: - product: windows - service: applocker - conditions: - winlog.channel: - - 'Microsoft-Windows-AppLocker/MSI and Script' - - 'Microsoft-Windows-AppLocker/EXE and DLL' - - 'Microsoft-Windows-AppLocker/Packaged app-Deployment' - - 'Microsoft-Windows-AppLocker/Packaged app-Execution' - windows-dns-server: - product: windows - service: dns-server - conditions: - winlog.channel: 'DNS Server' - windows-driver-framework: - product: windows - service: driver-framework - conditions: - winlog.channel: 'Microsoft-Windows-DriverFrameworks-UserMode/Operational' - windows-msexchange-management: - product: windows - service: msexchange-management - conditions: - winlog.channel: 'MSExchange Management' - windows-dhcp: - product: windows - service: dhcp - conditions: - winlog.channel: 'Microsoft-Windows-DHCP-Server/Operational' - windows-ntlm: - product: windows - service: ntlm - conditions: - winlog.channel: 'Microsoft-Windows-NTLM/Operational' - windows-firewall-advanced-security: - product: windows - service: firewall-as - conditions: - winlog.channel: 'Microsoft-Windows-Windows Firewall With Advanced Security/Firewall' - windows-bits-client: - product: windows - service: bits-client - conditions: - winlog.channel: 'Microsoft-Windows-Bits-Client/Operational' - windows-security-mitigations: - product: windows - service: security-mitigations - conditions: - winlog.channel: - - 'Microsoft-Windows-Security-Mitigations/Kernel Mode' - - 'Microsoft-Windows-Security-Mitigations/User Mode' - windows-diagnosis: - product: windows - service: diagnosis-scripted - conditions: - winlog.channel: 'Microsoft-Windows-Diagnosis-Scripted/Operational' - windows-shell-core: - product: windows - service: shell-core - conditions: - winlog.channel: 'Microsoft-Windows-Shell-Core/Operational' - windows-openssh: - product: windows - service: openssh - conditions: - winlog.channel: 'OpenSSH/Operational' - windows-ldap-debug: - product: windows - service: ldap_debug - conditions: - winlog.channel: 'Microsoft-Windows-LDAP-Client/Debug' - windows-bitlocker: - product: windows - service: bitlocker - conditions: - winlog.channel: 'Microsoft-Windows-BitLocker/BitLocker Management' - windows-vhdmp-operational: - product: windows - service: vhdmp - conditions: - winlog_channel: 'Microsoft-Windows-VHDMP/Operational' - windows-appxdeployment-server: - product: windows - service: appxdeployment-server - conditions: - winlog_channel: 'Microsoft-Windows-AppXDeploymentServer/Operational' - windows-lsa-server: - product: windows - service: lsa-server - conditions: - winlog_channel: 'Microsoft-Windows-LSA/Operational' - windows-appxpackaging-om: - product: windows - service: appxpackaging-om - conditions: - winlog_channel: 'Microsoft-Windows-AppxPackaging/Operational' - windows-dns-client: - product: windows - service: dns-client - conditions: - winlog_channel: 'Microsoft-Windows-DNS Client Events/Operational' - windows-appmodel-runtime: - product: windows - service: appmodel-runtime - conditions: - winlog_channel: 'Microsoft-Windows-AppModel-Runtime/Admin' -defaultindex: "*:so-*" -fieldmappings: - #START: SO Specific Mappings - logtype: event.code - EventID: event.code - Channel: winlog.channel - Protocol: network.transport - SourceIP: destination.ip.keyword - SourceHostname: source.hostname - DestinationHostname: destination.hostname - User: user.name - qid: result.columns.qid - hostname: result.hostname - counter: result.counter - column_name: columns.name - query_name: result.name - username: user.name - uid: user.uid - sid: rule.uuid - answer: answers - query: dns.query.name - src_ip: destination.ip.keyword - src_port: source.port - dst_ip: destination.ip.keyword - dst_port: destination.port - cs-method: http.method - c-uri: http.uri - c-useragent: http.useragent - cs-version: http.version - #END: SO Specfic Mappings - #START: Default WLB/ECS Mappings - Provider_Name: winlog.provider_name - CallingProcessName: winlog.event_data.CallingProcessName - ComputerName: winlog.computer_name - EventType: winlog.event_data.EventType - FailureCode: winlog.event_data.FailureCode - FileName: file.path - HiveName: winlog.event_data.HiveName - ProcessCommandLine: winlog.event_data.ProcessCommandLine - SecurityID: winlog.event_data.SecurityID - Source: winlog.event_data.Source - # Channel: WLAN-Autoconfig AND EventID: 8001 - AuthenticationAlgorithm: winlog.event_data.AuthenticationAlgorithm - BSSID: winlog.event_data.BSSID - BSSType: winlog.event_data.BSSType - CipherAlgorithm: winlog.event_data.CipherAlgorithm - ConnectionId: winlog.event_data.ConnectionId - ConnectionMode: winlog.event_data.ConnectionMode - InterfaceDescription: winlog.event_data.InterfaceDescription - InterfaceGuid: winlog.event_data.InterfaceGuid - OnexEnabled: winlog.event_data.OnexEnabled - PHYType: winlog.event_data.PHYType - ProfileName: winlog.event_data.ProfileName - SSID: winlog.event_data.SSID - Accesses: winlog.event_data.Accesses - ClassName: winlog.event_data.ClassName - ClassId: winlog.event_data.ClassId - DeviceDescription: winlog.event_data.DeviceDescription - # ErrorCode => printservice-admin EventID: 4909 or 808 - ErrorCode: - service=windefend: winlog.event_data.Error\ Code - default: winlog.event_data.ErrorCode - FilePath: winlog.event_data.FilePath - # Filename => category: antivirus - Filename: winlog.event_data.Filename - LDAPDisplayName: winlog.event_data.LDAPDisplayName - # Level => Source: MSExchange Control Panel EventID: 4 - Level: winlog.event_data.Level - TargetProcessAddress: winlog.event_data.TargetProcessAddress - # UserName => smbclient-security eventid:31017 - UserName: winlog.event_data.UserName - # - # Sysmon/Operational up to ID 25 - # - RuleName: winlog.event_data.RuleName - ProcessGuid: process.entity_id - ProcessId: process.pid - Image: process.executable - FileVersion: - category=process_creation: process.pe.file_version - category=image_load: file.pe.file_version - default: winlog.event_data.FileVersion - Description: - category=process_creation: process.pe.description - category=image_load: file.pe.description - category=sysmon_error: winlog.event_data.Description - default: winlog.event_data.Description - Product: - category=process_creation: process.pe.product - category=image_load: file.pe.product - default: winlog.event_data.Product - Company: - category=process_creation: process.pe.company - category=image_load: file.pe.company - default: winlog.event_data.Company - OriginalFileName: - category=process_creation: process.pe.original_file_name - category=image_load: file.pe.original_file_name - default: winlog.event_data.OriginalFileName - CommandLine: - category=process_creation: process.command_line - service=security: process.command_line - service=powershell-classic: powershell.command.value - default: process.command_line - CurrentDirectory: process.working_directory - LogonGuid: winlog.event_data.LogonGuid - LogonId: winlog.event_data.LogonId - TerminalSessionId: winlog.event_data.TerminalSessionId - IntegrityLevel: winlog.event_data.IntegrityLevel - ParentProcessGuid: process.parent.entity_id - ParentProcessId: process.parent.pid - ParentImage: process.parent.executable - ParentCommandLine: process.parent.command_line - ParentUser: winlog.event_data.ParentUser #Sysmon 13.30 - SourceUser: winlog.event_data.SourceUser #Sysmon 13.30 - TargetUser: winlog.event_data.TargetUser #Sysmon 13.30 - TargetFilename: file.path - CreationUtcTime: winlog.event_data.CreationUtcTime - PreviousCreationUtcTime: winlog.event_data.PreviousCreationUtcTime - Initiated: - category=network_connection: network.initiated - default: winlog.event_data.Initiated - #SourceIsIpv6: winlog.event_data.SourceIsIpv6 #=gets deleted and not boolean...https://github.com/elastic/beats/blob/71eee76e7cfb8d5b18dfacad64864370ddb14ce7/x-pack/winlogbeat/module/sysmon/config/winlogbeat-sysmon.js#L278-L279 - SourceIp: destination.ip.keyword - SourcePort: source.port - SourcePortName: winlog.event_data.SourcePortName - #DestinationIsIpv6: winlog.event_data.DestinationIsIpv6 #=gets deleted and not boolean...https://github.com/elastic/beats/blob/71eee76e7cfb8d5b18dfacad64864370ddb14ce7/x-pack/winlogbeat/module/sysmon/config/winlogbeat-sysmon.js#L278-L279 - DestinationIsIpv6: destination.ipv6 - DestinationIp: destination.ip.keyword - DestinationPort: destination.port - DestinationPortName: network.protocol - State: winlog.event_data.State - Version: winlog.event_data.Version - SchemaVersion: winlog.event_data.SchemaVersion - ImageLoaded: file.path - Signed: file.code_signature.signed - Signature: - category=driver_loaded: file.code_signature.subject_name - category=image_loaded: file.code_signature.subject_name - default: winlog.event_data.Signature - SignatureStatus: file.code_signature.status - SourceProcessGuid: process.entity_id - SourceProcessId: process.pid - SourceImage: process.executable - TargetProcessGuid: winlog.event_data.TargetProcessGuid - TargetProcessId: winlog.event_data.TargetProcessId - TargetImage: winlog.event_data.TargetImage - NewThreadId: winlog.event_data.NewThreadId - StartAddress: winlog.event_data.StartAddress - StartModule: winlog.event_data.StartModule - StartFunction: winlog.event_data.StartFunction - Device: file.path - SourceThreadId: process.thread.id - GrantedAccess: winlog.event_data.GrantedAccess - CallTrace: winlog.event_data.CallTrace - TargetObject: registry.path - Details: registry.value - NewName: winlog.event_data.NewName - Configuration: winlog.event_data.Configuration - ConfigurationFileHash: winlog.event_data.ConfigurationFileHash - PipeName: file.name - EventNamespace: winlog.event_data.EventNamespace - Name: winlog.event_data.Name - Query: winlog.event_data.Query - Operation: winlog.event_data.Operation - Type: winlog.event_data.Type - Destination: process.executable - Consumer: winlog.event_data.Consumer - Filter: winlog.event_data.Filter - QueryName: dns.question.name - QueryStatus: sysmon.dns.status - QueryResults: winlog.event_data.QueryResults - IsExecutable: sysmon.file.is_executable - Archived: sysmon.file.archived - Session: winlog.event_data.Session - ClientInfo: winlog.event_data.ClientInfo - # SYSMON Hashes - Hashes: message - # extraction from Hashes NOT a original field but find in some rule - md5: - category=driver_load: hash.md5 - category=image_load: file.hash.md5 - default: process.hash.md5 - sha1: - category=driver_load: hash.sha1 - category=image_load: file.hash.sha1 - default: process.hash.sha1 - sha256: - category=driver_load: hash.sha256 - category=image_load: file.hash.sha256 - default: process.hash.sha256 - Imphash: - category=driver_load: hash.imphash - category=image_load: file.hash.imphash - default: process.pe.imphash - # - # Powershell - # - CommandName: powershell.command.name - CommandPath: powershell.command.path - CommandType: powershell.command.type - EngineVersion: - service=powershell-classic: powershell.engine.version - service=windefend: winlog.event_data.Engine\ Version - default: winlog.event_data.EngineVersion - HostApplication: process.command_line - HostId: process.entity_id - HostName: process.title - HostVersion: - service=powershell-classic: powershell.process.executable_version - default: winlog.event_data.HostVersion - NewEngineState: powershell.engine.new_state - PipelineId: powershell.pipeline_id - PreviousEngineState: powershell.engine.previous_state - RunspaceId: powershell.runspace_id - ScriptName: file.path - SequenceNumber: event.sequence - NewProviderState: powershell.provider.new_state - ProviderName: powershell.provider.name - Payload: winlog.event_data.Payload - ContextInfo: winlog.event_data.ContextInfo - MessageNumber: powershell.sequence - MessageTotal: powershell.total - ScriptBlockText: powershell.file.script_block_text - ScriptBlockId: powershell.file.script_block_id - # - # Security - # - AccessGranted: winlog.event_data.AccessGranted - AccessList: winlog.event_data.AccessList - AccessMask: winlog.event_data.AccessMask - AccessReason: winlog.event_data.AccessReason - AccessRemoved: winlog.event_data.AccessRemoved - AccountDomain: user.domain - AccountExpires: winlog.event_data.AccountExpires - AccountName: user.name - AdditionalInfo: winlog.event_data.AdditionalInfo - AdditionalInfo2: winlog.event_data.AdditionalInfo2 - AllowedToDelegateTo: winlog.event_data.AllowedToDelegateTo - AppCorrelationID: winlog.event_data.AppCorrelationID - Application: process.executable - AttributeLDAPDisplayName: winlog.event_data.AttributeLDAPDisplayName - AttributeSyntaxOID: winlog.event_data.AttributeSyntaxOID - AttributeValue: winlog.event_data.AttributeValue - AuditPolicyChanges: winlog.event_data.AuditPolicyChanges - AuditSourceName: winlog.event_data.AuditSourceName - AuthenticationPackageName: winlog.event_data.AuthenticationPackageName - CallerProcessId: winlog.event_data.CallerProcessId - CallerProcessName: winlog.event_data.CallerProcessName - CategoryId: winlog.event_data.CategoryId - CertIssuerName: winlog.event_data.CertIssuerName - CertSerialNumber: winlog.event_data.CertSerialNumber - CertThumbprint: winlog.event_data.CertThumbprint - ClientAddress: destination.ip.keyword - ClientName: source.domain - ClientProcessId: winlog.event_data.ClientProcessId - ClientProcessStartKey: winlog.event_data.ClientProcessStartKey - ComputerAccountChange: winlog.event_data.ComputerAccountChange - CrashOnAuditFailValue: winlog.event_data.CrashOnAuditFailValue - DestAddress: destination.ip.keyword - DestPort: destination.port - Direction: winlog.event_data.Direction - DisplayName: winlog.event_data.DisplayName - DnsHostName: winlog.event_data.DnsHostName - DomainBehaviorVersion: winlog.event_data.DomainBehaviorVersion - DomainName: winlog.event_data.DomainName - DomainPolicyChanged: winlog.event_data.DomainPolicyChanged - DomainSid: winlog.event_data.DomainSid - DSName: winlog.event_data.DSName - DSType: winlog.event_data.DSType - Dummy: winlog.event_data.Dummy - ElevatedToken: winlog.event_data.ElevatedToken - EventSourceId: winlog.event_data.EventSourceId - FailureReason: winlog.event_data.FailureReason - FilterRTID: winlog.event_data.FilterRTID - ForceLogoff: winlog.event_data.ForceLogoff - FQDN: winlog.event_data.FQDN - GroupTypeChange: winlog.event_data.GroupTypeChange - HandleId: winlog.event_data.HandleId - HomeDirectory: winlog.event_data.HomeDirectory - HomePath: winlog.event_data.HomePath - ImagePath: winlog.event_data.ImagePath - ImpersonationLevel: winlog.event_data.ImpersonationLevel - IpAddress: destination.ip.keyword - IpPort: source.port - KeyLength: winlog.event_data.KeyLength - LayerName: winlog.event_data.LayerName - LayerRTID: winlog.event_data.LayerRTID - LmPackageName: winlog.event_data.LmPackageName - LockoutDuration: winlog.event_data.LockoutDuration - LockoutObservationWindow: winlog.event_data.LockoutObservationWindow - LockoutThreshold: winlog.event_data.LockoutThreshold - LogonHours: winlog.event_data.LogonHours - SubjectLogonId: - service=security: winlog.logon.id - default: winlog.event_data.SubjectLogonId - LogonProcessName: winlog.event_data.LogonProcessName - LogonType: winlog.event_data.LogonType - MachineAccountQuota: winlog.event_data.MachineAccountQuota - MandatoryLabel: winlog.event_data.MandatoryLabel - MasterKeyId: winlog.event_data.MasterKeyId - MaxPasswordAge: winlog.event_data.MaxPasswordAge - MemberName: winlog.event_data.MemberName - MemberSid: winlog.event_data.MemberSid - MinPasswordAge: winlog.event_data.MinPasswordAge - MinPasswordLength: winlog.event_data.MinPasswordLength - MixedDomainMode: winlog.event_data.MixedDomainMode - NewProcessId: process.pid - NewProcessName: process.executable - NewSd: winlog.event_data.NewSd - NewTargetUserName: winlog.event_data.NewTargetUserName - NewTime: winlog.event_data.NewTime - NewUacValue: winlog.event_data.NewUacValue - NewValue: - service=windefend: winlog.event_data.New\ Value - default: winlog.event_data.NewValue - NewValueType: winlog.event_data.NewValueType - ObjectClass: winlog.event_data.ObjectClass - ObjectDN: winlog.event_data.ObjectDN - ObjectGUID: winlog.event_data.ObjectGUID - ObjectName: winlog.event_data.ObjectName - ObjectServer: winlog.event_data.ObjectServer - ObjectType: winlog.event_data.ObjectType - ObjectValueName: winlog.event_data.ObjectValueName - OemInformation: winlog.event_data.OemInformation - OldSd: winlog.event_data.OldSd - OldTargetUserName: winlog.event_data.OldTargetUserName - OldUacValue: winlog.event_data.OldUacValue - OldValue: - service=windefend: winlog.event_data.Old\ Value - default: winlog.event_data.OldValue - OldValueType: winlog.event_data.OldValueType - OpCorrelationID: winlog.event_data.OpCorrelationID - OperationType: winlog.event_data.OperationType - PackageName: winlog.event_data.PackageName - ParentProcessName: process.parent.name - PasswordHistoryLength: winlog.event_data.PasswordHistoryLength - PasswordLastSet: winlog.event_data.PasswordLastSet - PasswordProperties: winlog.event_data.PasswordProperties - PreAuthType: winlog.event_data.PreAuthType - PreviousTime: winlog.event_data.PreviousTime - PrimaryGroupId: winlog.event_data.PrimaryGroupId - PrivilegeList: winlog.event_data.PrivilegeList - ProcessName: - service=windefend: winlog.event_data.Process\ Name - default: process.executable - ProfilePath: winlog.event_data.ProfilePath - Properties: winlog.event_data.Properties - PuaCount: winlog.event_data.PuaCount - PuaPolicyId: winlog.event_data.PuaPolicyId - RecoveryKeyId: winlog.event_data.RecoveryKeyId - RecoveryServer: winlog.event_data.RecoveryServer - RelativeTargetName: winlog.event_data.RelativeTargetName - RemoteMachineID: winlog.event_data.RemoteMachineID - RemoteUserID: winlog.event_data.RemoteUserID - ResourceAttributes: winlog.event_data.ResourceAttributes - RestrictedAdminMode: winlog.event_data.RestrictedAdminMode - RestrictedSidCount: winlog.event_data.RestrictedSidCount - RpcCallClientLocality: winlog.event_data.RpcCallClientLocality - SamAccountName: winlog.event_data.SamAccountName - ScriptPath: winlog.event_data.ScriptPath - Service: winlog.event_data.Service - ServiceAccount: winlog.event_data.ServiceAccount - ServiceFileName: winlog.event_data.ServiceFileName - ServiceName: - service=security: service.name - default: winlog.event_data.ServiceName - ServicePrincipalNames: winlog.event_data.ServicePrincipalNames - ServiceSid: winlog.event_data.ServiceSid - ServiceStartType: winlog.event_data.ServiceStartType - ServiceType: winlog.event_data.ServiceType - SessionId: winlog.event_data.SessionId - SessionName: winlog.event_data.SessionName - ShareLocalPath: winlog.event_data.ShareLocalPath - ShareName: winlog.event_data.ShareName - SidHistory: winlog.event_data.SidHistory - SidList: winlog.event_data.SidList - SourceAddress: destination.ip.keyword - Status: winlog.event_data.Status - StartType: winlog.event_data.StartType - SubcategoryGuid: winlog.event_data.SubcategoryGuid - SubcategoryId: winlog.event_data.SubcategoryId - SubjectDomainName: - service=security: user.domain - default: winlog.event_data.SubjectDomainName - SubjectUserName: - service=security: user.name - default: winlog.event_data.SubjectUserName - SubjectUserSid: - service=security: user.id - default: winlog.event_data.SubjectUserSid - SubStatus: winlog.event_data.SubStatus - TargetDomainName: user.domain - TargetLinkedLogonId: winlog.event_data.TargetLinkedLogonId - TargetLogonId: - service=security: winlog.logon.id - default: winlog.event_data.TargetLogonId - TargetOutboundDomainName: winlog.event_data.TargetOutboundDomainName - TargetOutboundUserName: winlog.event_data.TargetOutboundUserName - TargetServerName: winlog.event_data.TargetServerName - TargetSid: winlog.event_data.TargetSid - TargetUserName: winlog.event_data.TargetUserName - TargetUserSid: winlog.event_data.TargetUserSid - TaskContent: winlog.event_data.TaskContent - TaskName: winlog.event_data.TaskName - TicketEncryptionType: winlog.event_data.TicketEncryptionType - TicketOptions: winlog.event_data.TicketOptions - TokenElevationType: winlog.event_data.TokenElevationType - TransactionId: winlog.event_data.TransactionId - TransmittedServices: winlog.event_data.TransmittedServices - UserAccountControl: winlog.event_data.UserAccountControl - UserParameters: winlog.event_data.UserParameters - UserPrincipalName: winlog.event_data.UserPrincipalName - UserWorkstations: winlog.event_data.UserWorkstations - VirtualAccount: winlog.event_data.VirtualAccount - Workstation: winlog.event_data.Workstation - WorkstationName: source.domain - # - # System - # - DriveName: winlog.event_data.DriveName - DeviceName: winlog.event_data.DeviceName - HeaderFlags: winlog.event_data.HeaderFlags - Severity: winlog.event_data.Severity - Origin: winlog.event_data.Origin - Verb: winlog.event_data.Verb - Outcome: winlog.event_data.Outcome - SampleLength: winlog.event_data.SampleLength - SampleData: winlog.event_data.SampleData - SourceFile: winlog.event_data.SourceFile - SourceLine: winlog.event_data.SourceLine - SourceTag: winlog.event_data.SourceTag - CallStack: winlog.event_data.CallStack - # - # Microsoft-Windows-Windows Defender/Operational - # - ActionID: winlog.event_data.Action\ ID - ActionName: winlog.event_data.Action\ Name - AdditionalActionsID: winlog.event_data.Additional\ Actions\ ID - AdditionalActionsString: winlog.event_data.Additional\ Actions\ String - CategoryID: winlog.event_data.Category\ ID - CategoryName: winlog.event_data.Category\ Name - DetectionID: winlog.event_data.Detection\ ID - DetectionTime: winlog.event_data.Detection\ Time - DetectionUser: winlog.event_data.Detection\ User - ErrorDescription: winlog.event_data.Error\ Description - ExecutionID: winlog.event_data.Execution\ ID - ExecutionName: winlog.event_data.Execution\ Name - FWLink: winlog.event_data.FWLink - OriginID: winlog.event_data.Origin\ ID - OriginName: winlog.event_data.Origin\ Name - Path: winlog.event_data.Path - PostCleanStatus: winlog.event_data.Post\ Clean\ Status - PreExecutionStatus: winlog.event_data.Pre\ Execution\ Status - ProductName: winlog.event_data.Product\ Name - ProductVersion: winlog.event_data.Product\ Version - RemediationUser: winlog.event_data.Remediation\ User - SecurityintelligenceVersion: winlog.event_data.Security\ intelligence\ Version - SeverityID: winlog.event_data.Severity\ ID - SeverityName: winlog.event_data.Severity\ Name - SourceID: winlog.event_data.Source\ ID - SourceName: winlog.event_data.Source\ Name - StatusCode: winlog.event_data.Status\ Code - StatusDescription: winlog.event_data.Status\ Description - ThreatID: winlog.event_data.Threat\ ID - ThreatName: winlog.event_data.Threat\ Name - TypeID: winlog.event_data.Type\ ID - TypeName: winlog.event_data.Type\ Name - # - # Microsoft-Windows-Windows Firewall With Advanced Security/Firewall - # - ApplicationPath: winlog.event_data.ApplicationPath - ModifyingApplication: winlog.event_data.ModifyingApplication - Action: winlog.event_data.Action - #END: Default WLB/ECS Mappings diff --git a/so-soctopus/so-soctopus/playbook/securityonion-network.yml b/so-soctopus/so-soctopus/playbook/securityonion-network.yml deleted file mode 100644 index ea4776c..0000000 --- a/so-soctopus/so-soctopus/playbook/securityonion-network.yml +++ /dev/null @@ -1,26 +0,0 @@ -# Config file for network-based Sigma rules for use with Security Onion -title: SO Network -logsources: - bro: - category: dns - index: logstash-bro-* - firewall: - category: firewall - index: logstash-firewall-* - ids: - category: ids - index: logstash-ids-* - ids-snort: - product: snort - index: logstash-ids-* - ids-suricata: - product: suricata - index: logstash-ids-* -defaultindex: logstash-* -fieldmappings: - query: query - answer: answers - src_ip: source_ip - src_port: source_port - dst_ip: destination_ip - dst_port: destination_port diff --git a/so-soctopus/so-soctopus/playbook/sysmon.yml b/so-soctopus/so-soctopus/playbook/sysmon.yml deleted file mode 100644 index 20b0285..0000000 --- a/so-soctopus/so-soctopus/playbook/sysmon.yml +++ /dev/null @@ -1,78 +0,0 @@ -title: Conversion of Generic Rules into Sysmon Specific Rules -order: 10 -logsources: - process_creation: - category: process_creation - product: windows - conditions: - EventID: 1 - rewrite: - product: windows - service: sysmon - network_connection: - category: network_connection - product: windows - conditions: - EventID: 3 - rewrite: - product: windows - service: sysmon - dns_query: - category: dns_query - product: windows - conditions: - EventID: 22 - rewrite: - product: windows - service: sysmon - registry_event: - category: registry_event - product: windows - conditions: - EventID: - - 12 - - 13 - - 14 - rewrite: - product: windows - service: sysmon - file_creation: - category: file_event - product: windows - conditions: - EventID: 11 - rewrite: - product: windows - service: sysmon - process_access: - category: process_access - product: windows - conditions: - EventID: 10 - rewrite: - product: windows - service: sysmon - image_loaded: - category: image_load - product: windows - conditions: - EventID: 7 - rewrite: - product: windows - service: sysmon - driver_loaded: - category: driver_load - product: windows - conditions: - EventID: 6 - rewrite: - product: windows - service: sysmon - process_terminated: - category: process_termination - product: windows - conditions: - EventID: 5 - rewrite: - product: windows - service: sysmon \ No newline at end of file diff --git a/so-soctopus/so-soctopus/playbook_bulk-update.py b/so-soctopus/so-soctopus/playbook_bulk-update.py deleted file mode 100644 index cf5cb0a..0000000 --- a/so-soctopus/so-soctopus/playbook_bulk-update.py +++ /dev/null @@ -1,196 +0,0 @@ -from datetime import datetime -import json -import os -import re -import glob -import time -import hashlib -from pathlib import Path -import urllib3 -import subprocess - -import requests -import ruamel.yaml -from config import parser -import playbook -urllib3.disable_warnings() -yaml = ruamel.yaml.YAML(typ='safe') - -updated_plays = dict() -play_update_counter = 0 -play_new_counter = 0 -play_noupdate_counter = 0 -play_update_available_counter = 0 -plays = [] -offset = 0 - -playbook_headers = {'X-Redmine-API-Key': parser.get( - "playbook", "playbook_key"), 'Content-Type': 'application/json'} -playbook_url = parser.get("playbook", "playbook_url") - - -# Which ruleset categories should be imported / updated? -# rulesets = ['application','apt','cloud','compliance','generic','linux','network', 'proxy', 'web', 'windows'] -rulesets = parser.get('playbook', 'playbook_rulesets').split(",") - -############################################################## -# update_play(raw_sigma, repo_sigma, ruleset) -# This function compares the uuid of the current rule -# against the Playbook plays. If there is no match, then it -# creates a new play in Playbook. -# If there is a match, it then compares a hash of the sigma: -# Hash matches --> no update needed -# Hash doesn't match --> update the play in playbook -# inputs: raw sigma, sigma dict, and the playbook name -# returns: the status of the play: update / nop / new -def update_play(raw_sigma, repo_sigma, ruleset, ruleset_group, filename): - sigma_url = filename.replace('/SOCtopus/sigma/', 'https://github.com/Security-Onion-Solutions/sigma/tree/master/') - for play in plays: - if repo_sigma['id'] == play['sigma_id']: # Match sigma UUID - repo_hash = hashlib.sha256( - str(repo_sigma).encode('utf-8')).hexdigest() - playbook_hash = hashlib.sha256( - str(play['sigma_dict']).encode('utf-8')).hexdigest() - if repo_hash != playbook_hash: # Check if hashes match - file = filename[filename.rfind('/')+1:] - backup_path = "/SOCtopus/custom/sigma/" + file - - if os.path.exists(backup_path): # Does the sigma file exist in /SOCtopus/custom/sigma/? If yes, this means Auto-Update is disabled - try: - with open(backup_path, encoding="utf-8") as fpi2: - raw = fpi2.read() - backup_rule = yaml.load(raw) - backup_hash = hashlib.sha256(str(backup_rule).encode('utf-8')).hexdigest() - - except Exception as e: - print('Error - Unable to load backup copy' + str(e)) - - if repo_hash != backup_hash: # Does the sigma repo hash match the backup hash? If not, then an update is available - play_status = "available" - update_payload = {"issue": {"subject": repo_sigma['title'], "project_id": 1, "tracker": "Play", "custom_fields": [ - {"id": 31, "name": "Update Available", "value": "1"}]}} - url = f"{playbook_url}/issues/{play['issue_id']}.json" - r = requests.put(url, data=json.dumps( - update_payload), headers=playbook_headers, verify=False) - playbook.play_template_backup(play['issue_id']) - else: - play_status = "nop" - else: - play_status = "updated" - formatted_sigma = f'{{{{collapse(View Sigma)\n
\n\n{raw_sigma}\n
\n}}}}' - update_payload = {"issue": {"subject": repo_sigma['title'], "project_id": 1, "status": "Disabled", "tracker": "Play", "custom_fields": [ - {"id": 9, "name": "Sigma", "value": formatted_sigma.strip()}, \ - {"id": 28, "name": "Sigma URL", "value": sigma_url.strip()}, \ - {"id": 27, "name": "Sigma File", "value": filename.strip()}]}} - url = f"{playbook_url}/issues/{play['issue_id']}.json" - r = requests.put(url, data=json.dumps( - update_payload), headers=playbook_headers, verify=False) - else: - play_status = "nop" - break - - else: - print('No Current Play - Create New Play in Playbook') - play_status = "new" - creation_status = playbook.play_create(raw_sigma, repo_sigma,"community", ruleset, ruleset_group, "DRL-1.0", filename, sigma_url) - print (creation_status) - - return play_status - - -def rule_update(rulesets): - global play_update_counter - global play_new_counter - global play_noupdate_counter - global play_update_available_counter - ruleset_path = f"./sigma/rules/{rulesets}" - for filename in Path(ruleset_path).glob('**/*.yml'): - filepath = "/SOCtopus/" + str(filename) - if "deprecated" in str(filename): - print(f"\n\n - Not Loading - rule Deprecated - {filename}") - else: - print(f"\n\n{filename}") - sub_group = re.search(rf"{rulesets}\/(.*)\/",str(filename)) - if sub_group != None: - ruleset_group = sub_group.group(1) - else: - ruleset_group = None - - with open(filename, encoding="utf-8") as fpi2: - raw = fpi2.read() - try: - repo_sigma = yaml.load(raw) - #if folder == 'process_creation': - #folder = 'proc' - play_status = update_play(raw, repo_sigma, rulesets, ruleset_group, filepath) - print(play_status) - if play_status == "updated": - play_update_counter += 1 - elif play_status == "new": - play_new_counter += 1 - elif play_status == "nop": - play_noupdate_counter += 1 - elif play_status == "available": - play_update_available_counter += 1 - except Exception as e: - print('Error - Sigma rule skipped \n' + str(e)) - - return - -# Starting up.... -print(f"\n-= Started: {datetime.now()}-=\n") - -print( - f"\n\n-= Creating/Updating Plays based on the following categories: {rulesets} -=\n\n") - -# Get all the current plays from Playbook & parse out metadata -print(f"\n\n-= Parsing current Plays in Playbook -=\n\n") -time.sleep(20) -url = f"{playbook_url}/issues.json?offset=0&tracker_id=1&limit=100" -response = requests.get(url, headers=playbook_headers, verify=False).json() - -for i in response['issues']: - play_meta = playbook.play_metadata(i['id']) - plays.append(play_meta) - -while offset < response['total_count']: - offset += 100 - url = f"{playbook_url}/issues.json?offset={offset}&tracker_id=1&limit=100" - response = requests.get(url, headers=playbook_headers, verify=False).json() - print(f"offset: {offset}") - for i in response['issues']: - play_meta = playbook.play_metadata(i['id']) - plays.append(play_meta) - -print(f"\n-= Parsed Playbook Plays: {len(plays)} -=\n") - - -# Create / Update the community Sigma repo -sigma_repo = f"sigma/README.md" -if os.path.exists(sigma_repo): - git_status = subprocess.run( - ["git", "pull"], stdout=subprocess.PIPE, encoding='ascii',cwd='/SOCtopus/sigma') -else: - git_status = subprocess.run( - ["git", "clone", "https://github.com/SigmaHQ/sigma.git"], stdout=subprocess.PIPE, encoding='ascii') - - -''' -Next, loop through each sigma rule in the folder -Compare the uuid of the current rule against the Playbook plays -If no match, then create a new play in playbook -If there is a match, compare a hash of the sigma: - Hash matches --> no update needed - Hash doesn't match --> update the play in playbook -''' -for ruleset in rulesets: - rule_update(ruleset) - print (ruleset) - -# Finally, print a summary of new or updated plays -summary = ( - f"\n\n-= Update Summary =-\n\nSigma Community Repo:\n {git_status.stdout.strip()}\n\nUpdated Plays: {play_update_counter}\n" - f"Updates Available: {play_update_available_counter}\nNew Plays: {play_new_counter}\nNo Updates Needed: {play_noupdate_counter}\n\nEnabled Rulesets:\n{rulesets}\n") -print (summary) - -print (f"\n\n-= Completed: {datetime.now()}-=\n") diff --git a/so-soctopus/so-soctopus/playbook_elastalert_config.yaml b/so-soctopus/so-soctopus/playbook_elastalert_config.yaml deleted file mode 100644 index 7465c11..0000000 --- a/so-soctopus/so-soctopus/playbook_elastalert_config.yaml +++ /dev/null @@ -1,53 +0,0 @@ -# This is the folder that contains the rule yaml files -# Any .yaml file will be loaded as a rule -rules_folder: /opt/elastalert/rules/ - -# Sets whether or not ElastAlert should recursively descend -# the rules directory - true or false -scan_subdirectories: true - -# Do not disable a rule when an uncaught exception is thrown - -# This setting should be tweaked once the following issue has been fixed -# https://github.com/Security-Onion-Solutions/securityonion-saltstack/issues/98 -disable_rules_on_error: false - -# How often ElastAlert will query Elasticsearch -# The unit can be anything from weeks to seconds -run_every: - minutes: 1 - -# ElastAlert will buffer results from the most recent -# period of time, in case some log sources are not in real time -buffer_time: - minutes: 1 - -# The maximum time between queries for ElastAlert to start at the most recently -# run query. When ElastAlert starts, for each rule, it will search elastalert_metadata -# for the most recently run query and start from that time, unless it is older than -# old_query_limit, in which case it will start from the present time. The default is one week. -old_query_limit: - minutes: 5 - -# Sets timeout for connecting to and reading from es_host -es_conn_timeout: 60 - -# The maximum number of documents that will be downloaded from Elasticsearch in -# a single query. The default is 10,000, and if you expect to get near this number, -# consider using use_count_query for the rule. If this limit is reached, ElastAlert -# will scroll through pages the size of max_query_size until processing all results. -max_query_size: 5000 - - -# The index on es_host which is used for metadata storage -# This can be a unmapped index, but it is recommended that you run -# elastalert-create-index to set a mapping -writeback_index: elastalert_status - -# If an alert fails for some reason, ElastAlert will retry -# sending the alert until this time period has elapsed -alert_time_limit: - days: 2 - -index_settings: - shards: 1 - replicas: 0 diff --git a/so-soctopus/so-soctopus/playbook_play-sync.py b/so-soctopus/so-soctopus/playbook_play-sync.py deleted file mode 100644 index 4061a06..0000000 --- a/so-soctopus/so-soctopus/playbook_play-sync.py +++ /dev/null @@ -1,98 +0,0 @@ -from datetime import datetime -import json -import urllib3 -import os -import time - -import requests -from config import parser -import playbook -urllib3.disable_warnings() - -active_elastalert_counter = 0 -inactive_elastalert_counter = 0 -active_plays = [] -inactive_plays = [] -offset = 0 - -playbook_headers = {'X-Redmine-API-Key': parser.get( - "playbook", "playbook_key"), 'Content-Type': 'application/json'} -playbook_url = parser.get("playbook", "playbook_url") - - -print(f"\n-= Started: {datetime.now()}-=\n") - -# Get active plays from Playbook - id = 3 -url = f"{playbook_url}/issues.json?offset=0&tracker_id=1&limit=100&status_id=3" -response = requests.get(url, headers=playbook_headers, verify=False).json() - -for i in response['issues']: - active_plays.append(i) - -while offset < response['total_count']: - offset += 100 - url = f"{playbook_url}/issues.json?offset={offset}&tracker_id=1&limit=100&status_id=3" - response = requests.get(url, headers=playbook_headers, verify=False).json() - print(f"Active offset: {offset}") - for i in response['issues']: - active_plays.append(i) - -print(f"\n-= Parsed Playbook Plays: {len(active_plays)} -=\n") - -for play in active_plays: - - for item in play['custom_fields']: - if item['name'] == "PlayID": - play_id = item['value'] - - print(f"\n\n{play_id}") - - play_file = f"/etc/playbook-rules/{play_id}.yaml" - if os.path.exists(play_file): - print('All Good - Elastalert Config Exists') - else: - print('Warning - Elastalert Config Doesnt Exist') - active_elastalert_counter += 1 - playbook.elastalert_update(play['id']) - time.sleep(.5) - - -# Get inactive plays from Playbook - id = 4 -url = f"{playbook_url}/issues.json?offset=0&tracker_id=1&limit=100&status_id=4" -inactive_response = requests.get(url, headers=playbook_headers, verify=False).json() - -for i in inactive_response['issues']: - inactive_plays.append(i) - -while offset < inactive_response['total_count']: - offset += 100 - url = f"{playbook_url}/issues.json?offset={offset}&tracker_id=1&limit=100&status_id=4" - inactive_response = requests.get(url, headers=playbook_headers, verify=False).json() - print(f"Inactive offset: {offset}") - for i in inactive_response['issues']: - inactive_plays.append(i) - -for play in inactive_plays: - for item in play['custom_fields']: - if item['name'] == "PlayID": - play_id = item['value'] - - print(f"\n\nInactive - {play_id}") - - play_file = f"/etc/playbook-rules/{play_id}.yaml" - if os.path.exists(play_file): - print('Inactive Warning - Elastalert Config Exists') - os.remove(play_file) - inactive_elastalert_counter += 1 - -# Refresh Playbook Navigator Layer -playbook.navigator_update() - -print(f"\n\n-= Maintenance Summary =-\n\n" - f"Active Plays: {response['total_count']}" - f"\n-----------------\n" - f"Missing ElastAlert Configs: {active_elastalert_counter}\n" - f"Inactive Plays: {inactive_response['total_count']}\n" - f"-----------------\n" - f"Out of Sync ElastAlert Configs: {inactive_elastalert_counter}" - f"\n\n-= Completed: {datetime.now()}-=\n") diff --git a/so-soctopus/so-soctopus/playbook_play-update.py b/so-soctopus/so-soctopus/playbook_play-update.py deleted file mode 100644 index a56df09..0000000 --- a/so-soctopus/so-soctopus/playbook_play-update.py +++ /dev/null @@ -1,41 +0,0 @@ -from datetime import datetime -import json -import urllib3 -import os -import time - -import requests -from config import parser -import playbook -urllib3.disable_warnings() - -all_plays = [] -offset = 0 - -playbook_headers = {'X-Redmine-API-Key': parser.get( - "playbook", "playbook_key"), 'Content-Type': 'application/json'} -playbook_url = parser.get("playbook", "playbook_url") - - -print(f"\n-= Started: {datetime.now()}-=\n") - -# Get all plays from Playbook -url = f"{playbook_url}/issues.json?offset=0&tracker_id=1&limit=100" -response = requests.get(url, headers=playbook_headers, verify=False).json() - -for i in response['issues']: - all_plays.append(i) - -while offset < response['total_count']: - offset += 100 - url = f"{playbook_url}/issues.json?offset={offset}&tracker_id=1&limit=100" - response = requests.get(url, headers=playbook_headers, verify=False).json() - print(f"Active offset: {offset}") - for i in response['issues']: - all_plays.append(i) - -print(f"\n-= Parsed Playbook Plays: {len(all_plays)} -=\n") - -for play in all_plays: - playbook.play_update(play['id']) - print(f"\nIssue-ID - {play['id']}\n") \ No newline at end of file diff --git a/so-soctopus/so-soctopus/requirements.txt b/so-soctopus/so-soctopus/requirements.txt deleted file mode 100644 index 837afea..0000000 --- a/so-soctopus/so-soctopus/requirements.txt +++ /dev/null @@ -1,13 +0,0 @@ -urllib3>=1.26.5 -certifi>=2019.11 -flask-bootstrap>=3.3.7.0 -Flask==2.3.2 -Flask-WTF>=1.0.0 -jsonpickle>=1.2 -pymisp>=2.4,<2.5 -requests>=2.31.0 -rt>=2.0,<2.1 -ruamel.yaml>=0.16,<0.17 -sigmatools==0.23.1 -thehive4py>=1.6,<1.7 -Werkzeug>=2.2.3 diff --git a/so-soctopus/so-soctopus/templates/cancel.html b/so-soctopus/so-soctopus/templates/cancel.html deleted file mode 100644 index 10833f5..0000000 --- a/so-soctopus/so-soctopus/templates/cancel.html +++ /dev/null @@ -1,11 +0,0 @@ -{% extends "bootstrap/base.html" %} -{% block content %} - - - - - -{% endblock %} - diff --git a/so-soctopus/so-soctopus/templates/hive.html b/so-soctopus/so-soctopus/templates/hive.html deleted file mode 100644 index 2bd3c2e..0000000 --- a/so-soctopus/so-soctopus/templates/hive.html +++ /dev/null @@ -1,21 +0,0 @@ -{% extends "bootstrap/base.html" %} -{% block content %} - - -
- {{ form.csrf_token }} - - - -
- - -{% endblock %} - diff --git a/so-soctopus/so-soctopus/templates/postresult.html b/so-soctopus/so-soctopus/templates/postresult.html deleted file mode 100644 index ad91cb6..0000000 --- a/so-soctopus/so-soctopus/templates/postresult.html +++ /dev/null @@ -1,16 +0,0 @@ -{% extends "bootstrap/base.html" %} -{% block content %} - - - - {% for key, value in result.items() %} - - - - - {% endfor %} -
{{ key }} {{ value }}
- - -{% endblock %} - diff --git a/so-soctopus/so-soctopus/templates/result.html b/so-soctopus/so-soctopus/templates/result.html deleted file mode 100644 index 4cf4f11..0000000 --- a/so-soctopus/so-soctopus/templates/result.html +++ /dev/null @@ -1,15 +0,0 @@ -{% extends "bootstrap/base.html" %} -{% block content %} - - - ES Result - {% for key in result %} - {% if '@version' not in key %} - {{ key }}: {{ result[key] }}
- {% endif %} - {% endfor %} - {{ esindex }} - - -{% endblock %} - diff --git a/so-soctopus/so-soctopus/templates/strelka.html b/so-soctopus/so-soctopus/templates/strelka.html deleted file mode 100644 index aecba27..0000000 --- a/so-soctopus/so-soctopus/templates/strelka.html +++ /dev/null @@ -1,14 +0,0 @@ -{% extends "bootstrap/base.html" %} -{% block content %} - - - Strelka File Scan - - - -{% endblock %} diff --git a/so-soctopus/so-soctopus/templates/update_event.html b/so-soctopus/so-soctopus/templates/update_event.html deleted file mode 100644 index bd09e44..0000000 --- a/so-soctopus/so-soctopus/templates/update_event.html +++ /dev/null @@ -1,18 +0,0 @@ -{% extends "bootstrap/base.html" %} -{% block content %} - - - Update event - - -
- {{ form.csrf_token }} - {% for key, value in result.items() %} - {{ key }}:
- {% endfor %} - -
- - -{% endblock %} - diff --git a/so-soctopus/so-soctopus/wsgi.py b/so-soctopus/so-soctopus/wsgi.py deleted file mode 100644 index 4f93faf..0000000 --- a/so-soctopus/so-soctopus/wsgi.py +++ /dev/null @@ -1,4 +0,0 @@ -from SOCtopus import app - -if __name__ == "__main__": - app.run() \ No newline at end of file From c7e831c1b728706b9de6da3e378bb0e7a572784f Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 13 Aug 2024 11:58:45 -0400 Subject: [PATCH 06/21] Shrink layers --- so-steno/Dockerfile | 15 ++++++++++----- so-suricata/Dockerfile | 21 +++++++++++++-------- so-tcpreplay/Dockerfile | 22 ++++++++++++---------- so-zeek/Dockerfile | 20 +++++++++++--------- 4 files changed, 46 insertions(+), 32 deletions(-) diff --git a/so-steno/Dockerfile b/so-steno/Dockerfile index c5d3bc7..2ab195b 100644 --- a/so-steno/Dockerfile +++ b/so-steno/Dockerfile @@ -18,11 +18,16 @@ FROM ghcr.io/security-onion-solutions/oraclelinux:9 LABEL maintainer="Security Onion Solutions, LLC" LABEL description="Google Stenographer running in a docker for use with Security Onion." -# Common CentOS layer -RUN yum -y install epel-release bash libpcap iproute && \ - yum -y install snappy leveldb tcpdump jq libaio libseccomp golang which openssl && \ - yum -y erase epel-release && yum clean all && rm -rf /var/cache/yum && \ - groupadd -g 941 stenographer && \ +# Common Oracle layer +RUN dnf update -y && dnf -y install epel-release bash libpcap iproute + dnf clean all && rm -rf /var/cache/dnf/* + +# Packages Specific to this Container +RUN dnf -y install snappy leveldb tcpdump jq libaio libseccomp golang which openssl && \ + dnf -y erase epel-release && dnf clean all && rm -rf /var/cache/dnf/* + +# User configuration +RUN groupadd -g 941 stenographer && \ useradd stenographer -u 941 -g 941 && \ rpm -i https://github.com/Security-Onion-Solutions/securityonion-docker-rpm/releases/download/stenographer-v101/securityonion-stenographer-v1.0.1.0.rpm && \ chmod 755 /usr/bin/steno* && \ diff --git a/so-suricata/Dockerfile b/so-suricata/Dockerfile index e1dbbd1..dd18f41 100644 --- a/so-suricata/Dockerfile +++ b/so-suricata/Dockerfile @@ -15,9 +15,9 @@ FROM ghcr.io/security-onion-solutions/oraclelinux:9 as builder -RUN yum -y install epel-release && \ +RUN dnf update -y && \ + dnf -y install epel-release && \ dnf config-manager --enable ol9_codeready_builder - RUN dnf -y install oraclelinux-developer-release-el9 RUN dnf repolist RUN dnf -y install autoconf automake diffutils file-devel gcc gcc-c++ git \ @@ -43,10 +43,16 @@ LABEL description="Suricata running in a docker with AF_Packet for use with Secu COPY --from=builder /suricata/suriinstall/ / -RUN yum -y install epel-release bash libpcap iproute && \ - yum -y install luajit libnet jansson libyaml cargo rustc nss nss-devel libmaxminddb && \ - yum -y erase epel-release && yum clean all && rm -rf /var/cache/yum && \ - groupadd --gid 940 suricata && \ +# Common Oracle layer +RUN dnf update -y && dnf -y install epel-release bash libpcap iproute + dnf clean all && rm -rf /var/cache/dnf/* + +# Packages Specific to this Container +RUN dnf -y install luajit libnet jansson libyaml cargo rustc nss nss-devel libmaxminddb && \ + dnf -y erase epel-release && dnf clean all && rm -rf /var/cache/dnf/* + +# User configuration +RUN groupadd --gid 940 suricata && \ adduser --uid 940 --gid 940 --home-dir /etc/suricata --no-create-home suricata && \ chown -R 940:940 /etc/suricata && \ chown -R 940:940 /var/log/suricata @@ -54,7 +60,6 @@ RUN yum -y install epel-release bash libpcap iproute && \ # Copy over the entry script. ADD files/so-suricata.sh /usr/local/sbin/so-suricata.sh -RUN chmod +x /usr/local/sbin/so-suricata.sh -RUN rpm -i https://github.com/axellioinc/fx-libpcap/releases/download/px3_1.9.1-3/fx-libpcap-1.9.1-3.el9.x86_64.rpm +RUN chmod +x /usr/local/sbin/so-suricata.sh && rpm -i https://github.com/axellioinc/fx-libpcap/releases/download/px3_1.9.1-3/fx-libpcap-1.9.1-3.el9.x86_64.rpm ENTRYPOINT ["/usr/local/sbin/so-suricata.sh"] diff --git a/so-tcpreplay/Dockerfile b/so-tcpreplay/Dockerfile index a46598e..df88819 100644 --- a/so-tcpreplay/Dockerfile +++ b/so-tcpreplay/Dockerfile @@ -21,16 +21,18 @@ LABEL description="Replay PCAPs to sniffing interface(s)" # Copy over tcpreplay - using v4.2.6 instead of 4.3.x because of known bugs: https://github.com/appneta/tcpreplay/issues/557 COPY files/tcpreplay /usr/local/bin/tcpreplay -# Setup our utilities, download the pcap samples, convert them to RPM and install them -RUN yum update -y && \ - yum clean all && dnf config-manager --enable ol9_codeready_builder && dnf -y install oraclelinux-developer-release-el9 && dnf repolist && \ - yum -y install epel-release && \ - yum -y install libpcap && \ - yum -y install rpmrebuild && \ - yum -y install alien && \ - yum -y install wget libnsl && \ -\ -for i in securityonion-samples_20121202-0ubuntu0securityonion4_all.deb securityonion-samples-bro_20170824-1ubuntu1securityonion3_all.deb securityonion-samples-markofu_20130522-0ubuntu0securityonion3_all.deb securityonion-samples-mta_20190514-1ubuntu1securityonion1_all.deb securityonion-samples-shellshock_20140926-0ubuntu0securityonion2_all.deb; do wget https://launchpad.net/~securityonion/+archive/ubuntu/stable/+files/$i; done && \ +# Common Oracle layer +RUN dnf update -y && dnf -y install epel-release bash libpcap iproute + dnf clean all && rm -rf /var/cache/dnf/* + +# Packages Specific to this Container +RUN dnf config-manager --enable ol9_codeready_builder && dnf -y install oraclelinux-developer-release-el9 && dnf repolist && \ + dnf -y install rpmrebuild alien wget libnsl + dnf -y erase epel-release && dnf clean all && rm -rf /var/cache/dnf/* + +# User configuration + +RUN for i in securityonion-samples_20121202-0ubuntu0securityonion4_all.deb securityonion-samples-bro_20170824-1ubuntu1securityonion3_all.deb securityonion-samples-markofu_20130522-0ubuntu0securityonion3_all.deb securityonion-samples-mta_20190514-1ubuntu1securityonion1_all.deb securityonion-samples-shellshock_20140926-0ubuntu0securityonion2_all.deb; do wget https://launchpad.net/~securityonion/+archive/ubuntu/stable/+files/$i; done && \ \ alien -r *.deb && \ \ diff --git a/so-zeek/Dockerfile b/so-zeek/Dockerfile index f7bd0d5..d59b5b5 100644 --- a/so-zeek/Dockerfile +++ b/so-zeek/Dockerfile @@ -83,13 +83,16 @@ FROM ghcr.io/security-onion-solutions/oraclelinux:9 LABEL maintainer "Security Onion Solutions, LLC" LABEL description="Zeek running in docker for use with Security Onion" -# Common CentOS layer -RUN dnf update -y && \ - dnf -y install epel-release bash findutils libpcap iproute && \ - dnf -y install jemalloc numactl libnl3 libdnet gdb python3 && \ - dnf -y install libunwind-devel && \ - dnf -y erase epel-release && dnf clean all && rm -rf /var/cache/dnf && \ - groupadd --gid 937 zeek && \ +# Common Oracle layer +RUN dnf update -y && dnf -y install epel-release bash libpcap iproute + dnf clean all && rm -rf /var/cache/dnf/* + +# Packages Specific to this Container +RUN dnf -y install findutils jemalloc numactl libnl3 libdnet gdb libunwind-devel && \ + dnf -y erase epel-release && dnf clean all && rm -rf /var/cache/dnf/* + +# User configuration +RUN groupadd --gid 937 zeek && \ adduser --uid 937 --gid 937 --home-dir /opt/zeek --no-create-home zeek COPY --from=builder /nsm/zeek /nsm/zeek @@ -98,8 +101,7 @@ COPY --from=builder /usr/local/ssl/ /usr/local/ssl # Copy over the entry script. COPY files/zeek.sh /usr/local/sbin/zeek.sh -RUN chmod +x /usr/local/sbin/zeek.sh -RUN rpm -i https://github.com/axellioinc/fx-libpcap/releases/download/fxlibpcap-1.9.1/fx-libpcap-1.9.1-1.el9.x86_64.rpm +RUN chmod +x /usr/local/sbin/zeek.sh && rpm -i https://github.com/axellioinc/fx-libpcap/releases/download/fxlibpcap-1.9.1/fx-libpcap-1.9.1-1.el9.x86_64.rpm HEALTHCHECK --interval=10m --timeout=2m CMD runuser -u zeek -- /opt/zeek/bin/zeekctl status || (kill -s 15 -1 && (sleep 30; kill -s 9 -1)) From 1fb6d308d9b7a6a43caadbbc711471d0102cb5ec Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 13 Aug 2024 12:16:36 -0400 Subject: [PATCH 07/21] Shrink layers --- so-steno/Dockerfile | 2 +- so-suricata/Dockerfile | 2 +- so-tcpreplay/Dockerfile | 2 +- so-zeek/Dockerfile | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/so-steno/Dockerfile b/so-steno/Dockerfile index 2ab195b..0d726fe 100644 --- a/so-steno/Dockerfile +++ b/so-steno/Dockerfile @@ -19,7 +19,7 @@ LABEL maintainer="Security Onion Solutions, LLC" LABEL description="Google Stenographer running in a docker for use with Security Onion." # Common Oracle layer -RUN dnf update -y && dnf -y install epel-release bash libpcap iproute +RUN dnf update -y && dnf -y install epel-release bash libpcap iproute && \ dnf clean all && rm -rf /var/cache/dnf/* # Packages Specific to this Container diff --git a/so-suricata/Dockerfile b/so-suricata/Dockerfile index dd18f41..b4005c7 100644 --- a/so-suricata/Dockerfile +++ b/so-suricata/Dockerfile @@ -44,7 +44,7 @@ LABEL description="Suricata running in a docker with AF_Packet for use with Secu COPY --from=builder /suricata/suriinstall/ / # Common Oracle layer -RUN dnf update -y && dnf -y install epel-release bash libpcap iproute +RUN dnf update -y && dnf -y install epel-release bash libpcap iproute && \ dnf clean all && rm -rf /var/cache/dnf/* # Packages Specific to this Container diff --git a/so-tcpreplay/Dockerfile b/so-tcpreplay/Dockerfile index df88819..1b86107 100644 --- a/so-tcpreplay/Dockerfile +++ b/so-tcpreplay/Dockerfile @@ -22,7 +22,7 @@ LABEL description="Replay PCAPs to sniffing interface(s)" COPY files/tcpreplay /usr/local/bin/tcpreplay # Common Oracle layer -RUN dnf update -y && dnf -y install epel-release bash libpcap iproute +RUN dnf update -y && dnf -y install epel-release bash libpcap iproute && \ dnf clean all && rm -rf /var/cache/dnf/* # Packages Specific to this Container diff --git a/so-zeek/Dockerfile b/so-zeek/Dockerfile index d59b5b5..84da3ac 100644 --- a/so-zeek/Dockerfile +++ b/so-zeek/Dockerfile @@ -84,7 +84,7 @@ LABEL maintainer "Security Onion Solutions, LLC" LABEL description="Zeek running in docker for use with Security Onion" # Common Oracle layer -RUN dnf update -y && dnf -y install epel-release bash libpcap iproute +RUN dnf update -y && dnf -y install epel-release bash libpcap iproute && \ dnf clean all && rm -rf /var/cache/dnf/* # Packages Specific to this Container From 01381d643fd839cd7bcff8bc734975c94d8da845 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 13 Aug 2024 12:21:56 -0400 Subject: [PATCH 08/21] Update Elastalert to 2.19.0 --- so-elastalert/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/so-elastalert/Dockerfile b/so-elastalert/Dockerfile index a6ba2b7..2bc80af 100644 --- a/so-elastalert/Dockerfile +++ b/so-elastalert/Dockerfile @@ -1,4 +1,4 @@ -FROM ghcr.io/jertel/elastalert2/elastalert2:2.12.0 +FROM ghcr.io/jertel/elastalert2/elastalert2:2.19.0 LABEL maintainer "Security Onion Solutions, LLC" ARG GID=933 From aebde8aef16e20caf262153ea974804f0311a4dd Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 13 Aug 2024 12:33:22 -0400 Subject: [PATCH 09/21] Shrink layers --- so-tcpreplay/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/so-tcpreplay/Dockerfile b/so-tcpreplay/Dockerfile index 1b86107..dcefe45 100644 --- a/so-tcpreplay/Dockerfile +++ b/so-tcpreplay/Dockerfile @@ -27,7 +27,7 @@ RUN dnf update -y && dnf -y install epel-release bash libpcap iproute && \ # Packages Specific to this Container RUN dnf config-manager --enable ol9_codeready_builder && dnf -y install oraclelinux-developer-release-el9 && dnf repolist && \ - dnf -y install rpmrebuild alien wget libnsl + dnf -y install rpmrebuild alien wget libnsl %% \ dnf -y erase epel-release && dnf clean all && rm -rf /var/cache/dnf/* # User configuration From eb33af84efab96d556164819cb36536c18828b01 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 13 Aug 2024 13:12:17 -0400 Subject: [PATCH 10/21] Shrink layers --- so-tcpreplay/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/so-tcpreplay/Dockerfile b/so-tcpreplay/Dockerfile index dcefe45..e2f325c 100644 --- a/so-tcpreplay/Dockerfile +++ b/so-tcpreplay/Dockerfile @@ -27,7 +27,7 @@ RUN dnf update -y && dnf -y install epel-release bash libpcap iproute && \ # Packages Specific to this Container RUN dnf config-manager --enable ol9_codeready_builder && dnf -y install oraclelinux-developer-release-el9 && dnf repolist && \ - dnf -y install rpmrebuild alien wget libnsl %% \ + dnf -y install rpmrebuild alien wget libnsl && \ dnf -y erase epel-release && dnf clean all && rm -rf /var/cache/dnf/* # User configuration From 7e85f834177801b594befc12e5805204c139e824 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 13 Aug 2024 13:57:46 -0400 Subject: [PATCH 11/21] Upgrade influx and telegraf --- so-redis/Dockerfile | 2 +- so-telegraf/Dockerfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/so-redis/Dockerfile b/so-redis/Dockerfile index 1009750..405658e 100644 --- a/so-redis/Dockerfile +++ b/so-redis/Dockerfile @@ -1,4 +1,4 @@ -FROM ghcr.io/security-onion-solutions/redis:6-alpine +FROM ghcr.io/security-onion-solutions/redis:7.2.5-alpine LABEL maintainer "Security Onion Solutions, LLC" LABEL description="REDIS running in Docker container for use with Security Onion" RUN addgroup -g 939 socore && adduser -D --uid 939 --ingroup socore socore && \ diff --git a/so-telegraf/Dockerfile b/so-telegraf/Dockerfile index 3bcc053..c49a147 100644 --- a/so-telegraf/Dockerfile +++ b/so-telegraf/Dockerfile @@ -1,4 +1,4 @@ -FROM ghcr.io/security-onion-solutions/telegraf:1.28.2-alpine +FROM ghcr.io/security-onion-solutions/telegraf:1.31.3-alpine LABEL maintainer "Security Onion Solutions, LLC" LABEL description="Telegraf running in Docker container for use with Security Onion" From 0c0fc172743b88857c099b293c558264538bbef2 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 13 Aug 2024 14:02:37 -0400 Subject: [PATCH 12/21] Shrink layers even more --- so-suricata/Dockerfile | 4 ++-- so-tcpreplay/Dockerfile | 7 ++++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/so-suricata/Dockerfile b/so-suricata/Dockerfile index b4005c7..b6f0552 100644 --- a/so-suricata/Dockerfile +++ b/so-suricata/Dockerfile @@ -41,8 +41,6 @@ FROM ghcr.io/security-onion-solutions/oraclelinux:9 LABEL maintainer "Security Onion Solutions, LLC" LABEL description="Suricata running in a docker with AF_Packet for use with Security Onion." -COPY --from=builder /suricata/suriinstall/ / - # Common Oracle layer RUN dnf update -y && dnf -y install epel-release bash libpcap iproute && \ dnf clean all && rm -rf /var/cache/dnf/* @@ -51,6 +49,8 @@ RUN dnf update -y && dnf -y install epel-release bash libpcap iproute && \ RUN dnf -y install luajit libnet jansson libyaml cargo rustc nss nss-devel libmaxminddb && \ dnf -y erase epel-release && dnf clean all && rm -rf /var/cache/dnf/* +COPY --from=builder /suricata/suriinstall/ / + # User configuration RUN groupadd --gid 940 suricata && \ adduser --uid 940 --gid 940 --home-dir /etc/suricata --no-create-home suricata && \ diff --git a/so-tcpreplay/Dockerfile b/so-tcpreplay/Dockerfile index e2f325c..a8238bf 100644 --- a/so-tcpreplay/Dockerfile +++ b/so-tcpreplay/Dockerfile @@ -18,9 +18,6 @@ FROM ghcr.io/security-onion-solutions/oraclelinux:9 LABEL maintainer="Security Onion Solutions, LLC" LABEL description="Replay PCAPs to sniffing interface(s)" -# Copy over tcpreplay - using v4.2.6 instead of 4.3.x because of known bugs: https://github.com/appneta/tcpreplay/issues/557 -COPY files/tcpreplay /usr/local/bin/tcpreplay - # Common Oracle layer RUN dnf update -y && dnf -y install epel-release bash libpcap iproute && \ dnf clean all && rm -rf /var/cache/dnf/* @@ -30,6 +27,10 @@ RUN dnf config-manager --enable ol9_codeready_builder && dnf -y install oracleli dnf -y install rpmrebuild alien wget libnsl && \ dnf -y erase epel-release && dnf clean all && rm -rf /var/cache/dnf/* + +# Copy over tcpreplay - using v4.2.6 instead of 4.3.x because of known bugs: https://github.com/appneta/tcpreplay/issues/557 +COPY files/tcpreplay /usr/local/bin/tcpreplay + # User configuration RUN for i in securityonion-samples_20121202-0ubuntu0securityonion4_all.deb securityonion-samples-bro_20170824-1ubuntu1securityonion3_all.deb securityonion-samples-markofu_20130522-0ubuntu0securityonion3_all.deb securityonion-samples-mta_20190514-1ubuntu1securityonion1_all.deb securityonion-samples-shellshock_20140926-0ubuntu0securityonion2_all.deb; do wget https://launchpad.net/~securityonion/+archive/ubuntu/stable/+files/$i; done && \ From 854fe080ece5ebccb44fee4944c17539037bce80 Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 13 Aug 2024 14:35:41 -0400 Subject: [PATCH 13/21] Update golang and alpine versions --- so-strelka-filestream/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/so-strelka-filestream/Dockerfile b/so-strelka-filestream/Dockerfile index d778746..e7f89e1 100644 --- a/so-strelka-filestream/Dockerfile +++ b/so-strelka-filestream/Dockerfile @@ -1,10 +1,10 @@ -FROM ghcr.io/security-onion-solutions/golang:1.21.5-alpine AS build +FROM ghcr.io/security-onion-solutions/golang:1.22.6-alpine AS build LABEL maintainer "Security Onion Solutions, LLC" ARG STRELKA_RELEASE_VERSION=0.24.01.18 RUN CGO_ENABLED=0 go install github.com/target/strelka/src/go/cmd/strelka-filestream@$STRELKA_RELEASE_VERSION -FROM ghcr.io/security-onion-solutions/alpine +FROM ghcr.io/security-onion-solutions/alpine:3.20.2 COPY --from=build /go/bin/strelka-filestream /usr/local/bin/ RUN addgroup -g 939 strelka && \ adduser -u 939 -G strelka strelka --disabled-password \ From 7f1474f5bf8e47ee91e3fdf92c7b2d563266178f Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 13 Aug 2024 14:36:23 -0400 Subject: [PATCH 14/21] Update golang and alpine versions --- so-strelka-frontend/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/so-strelka-frontend/Dockerfile b/so-strelka-frontend/Dockerfile index c12e883..f516f8d 100644 --- a/so-strelka-frontend/Dockerfile +++ b/so-strelka-frontend/Dockerfile @@ -1,4 +1,4 @@ -FROM ghcr.io/security-onion-solutions/golang:1.21.5-alpine AS build +FROM ghcr.io/security-onion-solutions/golang:1.22.6-alpine AS build LABEL maintainer "Security Onion Solutions, LLC" ARG STRELKA_RELEASE_VERSION=0.24.01.18 @@ -10,7 +10,7 @@ RUN apk add openssl-dev \ librdkafka-dev && \ CGO_ENABLED=1 go install -tags musl github.com/target/strelka/src/go/cmd/strelka-frontend@$STRELKA_RELEASE_VERSION -FROM ghcr.io/security-onion-solutions/alpine +FROM ghcr.io/security-onion-solutions/alpine:3.20.2 COPY --from=build /go/bin/strelka-frontend /usr/local/bin/ From 54256aea0104882150b960b13a7ada319798e311 Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 13 Aug 2024 14:36:47 -0400 Subject: [PATCH 15/21] Update golang and alpine versions --- so-strelka-manager/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/so-strelka-manager/Dockerfile b/so-strelka-manager/Dockerfile index 094b3a7..39985b7 100644 --- a/so-strelka-manager/Dockerfile +++ b/so-strelka-manager/Dockerfile @@ -1,10 +1,10 @@ -FROM ghcr.io/security-onion-solutions/golang:1.21.5-alpine AS build +FROM ghcr.io/security-onion-solutions/golang:1.22.6-alpine AS build LABEL maintainer "Security Onion Solutions, LLC" ARG STRELKA_RELEASE_VERSION=0.24.01.18 RUN CGO_ENABLED=0 go install github.com/target/strelka/src/go/cmd/strelka-manager@$STRELKA_RELEASE_VERSION -FROM ghcr.io/security-onion-solutions/alpine +FROM ghcr.io/security-onion-solutions/alpine:3.20.2 COPY --from=build /go/bin/strelka-manager /usr/local/bin/ RUN addgroup -g 939 strelka && \ adduser -u 939 -G strelka strelka --disabled-password \ From fff5c99f9fe6f48267e7050fda4bca55337c67b4 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Wed, 14 Aug 2024 11:04:35 -0400 Subject: [PATCH 16/21] Standardize base image --- so-idh/Dockerfile | 2 +- so-idstools/Dockerfile | 11 ++--------- so-pcaptools/Dockerfile | 10 +++++----- 3 files changed, 8 insertions(+), 15 deletions(-) diff --git a/so-idh/Dockerfile b/so-idh/Dockerfile index 69cb09f..8f90e3a 100644 --- a/so-idh/Dockerfile +++ b/so-idh/Dockerfile @@ -1,4 +1,4 @@ -FROM python:3.12.4-slim +FROM ghcr.io/security-onion-solutions/python:3.12.5-slim WORKDIR /root/ diff --git a/so-idstools/Dockerfile b/so-idstools/Dockerfile index b095704..d371a9a 100644 --- a/so-idstools/Dockerfile +++ b/so-idstools/Dockerfile @@ -13,21 +13,16 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -FROM ghcr.io/security-onion-solutions/python:3-alpine +FROM ghcr.io/security-onion-solutions/python:3.12.5-slim LABEL maintainer "Security Onion Solutions, LLC" -LABEL description="IDSTools for downloading rules" +LABEL description="IDSTools for downloading NIDS rules" ARG GID=939 ARG UID=939 ARG USERNAME=socore ARG VERSION=0.6.3 -RUN apk add --no-cache --virtual .build-deps\ - shadow - -RUN apk add --no-cache bash - RUN mkdir -p /opt/so/idstools/bin && mkdir /opt/so/idstools/etc COPY entrypoint.sh /opt/so/idstools/bin RUN chmod +x /opt/so/idstools/bin/entrypoint.sh && chown -R ${UID}:${GID} /opt/so/idstools @@ -39,8 +34,6 @@ RUN groupadd --gid ${GID} ${USERNAME} && \ #RUN pip install https://github.com/jasonish/py-idstools/archive/master.zip RUN pip install https://github.com/Security-Onion-Solutions/py-idstools/archive/master.zip -RUN apk del .build-deps - USER ${USERNAME} WORKDIR /opt/so/idstools/bin ENTRYPOINT ["./entrypoint.sh"] diff --git a/so-pcaptools/Dockerfile b/so-pcaptools/Dockerfile index 1cf8c21..2b3566e 100644 --- a/so-pcaptools/Dockerfile +++ b/so-pcaptools/Dockerfile @@ -1,4 +1,4 @@ -FROM ghcr.io/security-onion-solutions/python:3-slim AS builder +FROM ghcr.io/security-onion-solutions/python:3.12.5-slim AS builder LABEL maintainer "Security Onion Solutions, LLC" LABEL description="Tools for use with PCAP & EVTX files" @@ -14,7 +14,7 @@ RUN wget http://f00l.de/pcapfix/pcapfix-${PCAPFIX_VERSION}.tar.gz && \ make && \ make install -FROM ghcr.io/security-onion-solutions/python:3-slim +FROM ghcr.io/security-onion-solutions/python:3.12.5-slim ADD evtx_calc_timestamps.sh /evtx_calc_timestamps.sh RUN chmod +x /evtx_calc_timestamps.sh @@ -22,11 +22,11 @@ RUN chmod +x /evtx_calc_timestamps.sh ADD timeshift.py /timeshift.py RUN chmod +x /timeshift.py -# libwiretap11 is required for capinfo libaries +# libwiretap13 is required for capinfo libaries # jq is required for evtx timestamp script -RUN apt-get update && apt-get install -y --no-install-recommends --force-yes libwiretap11 git jq && rm -rf /var/lib/apt/lists/* +RUN apt-get update && apt-get install -y --no-install-recommends --force-yes libwiretap13 git jq && rm -rf /var/lib/apt/lists/* RUN pip3 install evtx elasticsearch==7.17.1 tqdm orjson importlib_metadata RUN pip3 install evtx2es --no-dependencies COPY --from=builder /usr/bin/pcapfix /usr/bin/ -COPY --from=builder /usr/bin/capinfos /usr/bin/ +COPY --from=builder /usr/bin/capinfos /usr/bin/ \ No newline at end of file From 62f8f9dd8778558b9e9bacb4420ff8134eda331a Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Wed, 14 Aug 2024 12:21:12 -0400 Subject: [PATCH 17/21] Update influx and agent builder --- so-elastic-agent-builder/Dockerfile | 2 +- so-elastic-agent-builder/source/go.mod | 19 +++++++++-------- so-elastic-agent-builder/source/go.sum | 28 +++++++++++++++++--------- so-influxdb/Dockerfile | 7 ++++--- 4 files changed, 32 insertions(+), 24 deletions(-) diff --git a/so-elastic-agent-builder/Dockerfile b/so-elastic-agent-builder/Dockerfile index bf97641..83cd7d7 100644 --- a/so-elastic-agent-builder/Dockerfile +++ b/so-elastic-agent-builder/Dockerfile @@ -2,7 +2,7 @@ # or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use # this file except in compliance with the Elastic License 2.0. -FROM golang:1.20-alpine +FROM ghcr.io/security-onion-solutions/golang:1.22.6-alpine RUN mkdir /workspace ADD source /workspace diff --git a/so-elastic-agent-builder/source/go.mod b/so-elastic-agent-builder/source/go.mod index c6dcb23..8a56699 100644 --- a/so-elastic-agent-builder/source/go.mod +++ b/so-elastic-agent-builder/source/go.mod @@ -8,16 +8,15 @@ require ( ) require ( - github.com/andybalholm/brotli v1.0.1 // indirect + github.com/andybalholm/brotli v1.1.0 // indirect github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect - github.com/go-logfmt/logfmt v0.4.0 // indirect - github.com/golang/snappy v0.0.2 // indirect - github.com/klauspost/compress v1.11.4 // indirect - github.com/klauspost/pgzip v1.2.5 // indirect - github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 // indirect - github.com/nwaples/rardecode v1.1.0 // indirect - github.com/pierrec/lz4/v4 v4.1.2 // indirect - github.com/pkg/errors v0.8.1 // indirect - github.com/ulikunitz/xz v0.5.9 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/klauspost/compress v1.17.9 // indirect + github.com/klauspost/pgzip v1.2.6 // indirect + github.com/nwaples/rardecode v1.1.3 // indirect + github.com/pierrec/lz4/v4 v4.1.21 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/ulikunitz/xz v0.5.12 // indirect github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect ) diff --git a/so-elastic-agent-builder/source/go.sum b/so-elastic-agent-builder/source/go.sum index 07ac261..eaea0a0 100644 --- a/so-elastic-agent-builder/source/go.sum +++ b/so-elastic-agent-builder/source/go.sum @@ -1,5 +1,6 @@ -github.com/andybalholm/brotli v1.0.1 h1:KqhlKozYbRtJvsPrrEeXcO+N2l6NYT5A2QAFmSULpEc= github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M= +github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY= github.com/apex/log v1.9.0 h1:FHtw/xuaM8AgmvDDTI9fiwoAL25Sq2cxojnZICUU8l0= github.com/apex/log v1.9.0/go.mod h1:m82fZlWIuiWzWP04XCTXmnX0xRkYYbCdYn8jbJeLBEA= github.com/apex/logs v1.0.0/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo= @@ -15,24 +16,27 @@ github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5/go.mod h1:qssHWj6 github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/snappy v0.0.2 h1:aeE13tS0IiQgFjYdoL8qN3K1N2bXXtI6Vi51/y7BpMw= github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.11.4 h1:kz40R/YWls3iqT9zX9AHN3WoVsrAWVyui5sxuLqiXqU= github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= +github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= +github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -44,14 +48,17 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/mholt/archiver/v3 v3.5.1 h1:rDjOBX9JSF5BvoJGvjqK479aL70qh9DIpZCl+k7Clwo= github.com/mholt/archiver/v3 v3.5.1/go.mod h1:e3dqJ7H78uzsRSEACH1joayhuSyhnonssnDhppzS1L4= -github.com/nwaples/rardecode v1.1.0 h1:vSxaY8vQhOcVr4mm5e8XllHWTiM4JF507A0Katqw7MQ= github.com/nwaples/rardecode v1.1.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= +github.com/nwaples/rardecode v1.1.3 h1:cWCaZwfM5H7nAD6PyEdcVnczzV8i/JtotnyW/dD9lEc= +github.com/nwaples/rardecode v1.1.3/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/pierrec/lz4/v4 v4.1.2 h1:qvY3YFXRQE/XB8MlLzJH7mSzBs74eA2gg52YTk6jUPM= github.com/pierrec/lz4/v4 v4.1.2/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= +github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= @@ -71,8 +78,9 @@ github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eN github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao= github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/ulikunitz/xz v0.5.9 h1:RsKRIA2MO8x56wkkcd3LbtcE/uMszhb6DpRf+3uwa3I= github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= +github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= diff --git a/so-influxdb/Dockerfile b/so-influxdb/Dockerfile index 9ebde4a..161218f 100644 --- a/so-influxdb/Dockerfile +++ b/so-influxdb/Dockerfile @@ -1,9 +1,10 @@ -ARG INFLUX_VERSION=2.7.1 +ARG INFLUX_VERSION=2.7.9 -FROM ghcr.io/security-onion-solutions/ubuntu:23.04 as builder +FROM ghcr.io/security-onion-solutions/ubuntu:24.10 as builder ARG INFLUX_VERSION -ARG NODE_VERSION=v20.8.1 +#v20.x is LTS +ARG NODE_VERSION=v20.16.0 ARG NODE_ARCH=linux-x64 RUN apt update -y && apt install -y git wget xz-utils From a1983c3bcb2bf9b790656329854c6b808095f505 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 14 Aug 2024 14:34:50 -0400 Subject: [PATCH 18/21] update kafka Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- so-kafka/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/so-kafka/Dockerfile b/so-kafka/Dockerfile index dce11b3..43f2149 100644 --- a/so-kafka/Dockerfile +++ b/so-kafka/Dockerfile @@ -3,12 +3,12 @@ # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. -FROM ghcr.io/security-onion-solutions/kafka:3.7.0 +FROM ghcr.io/security-onion-solutions/kafka:3.8.0 LABEL maintainer "Security Onion Solutions, LLC" LABEL description="Kafka running in a docker container for use with Security Onion" -ARG JOLOKIA_VERSION=2.0.2 +ARG JOLOKIA_VERSION=2.1.0 ARG JOLOKIA_DOWNLOAD=https://github.com/jolokia/jolokia/releases/download/v${JOLOKIA_VERSION}/jolokia-${JOLOKIA_VERSION}-bin.tar.gz WORKDIR /opt From 133a1dae432bba64ade1d5500e097cbed5412054 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Thu, 15 Aug 2024 10:01:32 -0400 Subject: [PATCH 19/21] Update base images --- so-nginx/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/so-nginx/Dockerfile b/so-nginx/Dockerfile index b56f7b8..001c2eb 100644 --- a/so-nginx/Dockerfile +++ b/so-nginx/Dockerfile @@ -14,7 +14,7 @@ # along with this program. If not, see . # Navigator build stage -FROM ghcr.io/security-onion-solutions/node:22.4.1-alpine as navigator-builder +FROM ghcr.io/security-onion-solutions/node:22.6.0-alpine as navigator-builder ARG NAVIGATOR_VERSION=4.9.1 @@ -31,7 +31,7 @@ RUN sed -i '//d' ./dist/index.html ################################### -FROM nginx:1.26.1-alpine +FROM ghcr.io/security-onion-solutions/nginx:1.26.1-alpine HEALTHCHECK --interval=5m --timeout=3s CMD curl --fail http://localhost/ || exit 1 LABEL maintainer "Security Onion Solutions, LLC" LABEL description="Security Onion Core Functions Docker" From 177268eb7cc7dce0974184eef1ea62341e6202a8 Mon Sep 17 00:00:00 2001 From: weslambert Date: Thu, 15 Aug 2024 14:01:27 -0400 Subject: [PATCH 20/21] Use apt upgrade to update packages --- so-strelka-backend/Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/so-strelka-backend/Dockerfile b/so-strelka-backend/Dockerfile index 622577a..7109054 100644 --- a/so-strelka-backend/Dockerfile +++ b/so-strelka-backend/Dockerfile @@ -30,7 +30,8 @@ RUN mkdir /strelka && \ mkdir /etc/strelka && \ mkdir /tmp/strelka && \ mkdir /var/log/strelka && \ - apt -y update && \ + apt -y update && \ + apt -y upgrade && \ apt install git -y && \ git clone -b $STRELKA_RELEASE_VERSION https://github.com/target/strelka /tmp/strelka && \ cp -fr /tmp/strelka/pyproject.toml /strelka/ && \ From 353cab38e637cf3789359b3ea8965db40974b083 Mon Sep 17 00:00:00 2001 From: Jorge Reyes <94730068+reyesj2@users.noreply.github.com> Date: Tue, 27 Aug 2024 11:21:19 -0400 Subject: [PATCH 21/21] Update Dockerfile --- so-zeek/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/so-zeek/Dockerfile b/so-zeek/Dockerfile index 84da3ac..915692b 100644 --- a/so-zeek/Dockerfile +++ b/so-zeek/Dockerfile @@ -20,7 +20,7 @@ RUN dnf -y install dnf-plugins-core && \ dnf update -y && \ dnf -y install epel-release bash libpcap iproute wget cmake swig && \ dnf -y install jemalloc numactl libnl3 libdnet gdb git && \ - dnf -y install libpcap-devel openssl-devel zlib-devel jemalloc-devel python3-devel kernel-devel kernel-headers && \ + dnf -y install libpcap-devel openssl-devel zlib-devel jemalloc-devel python3-devel python3 kernel-devel kernel-headers && \ dnf group install -y "Development Tools" && \ yum install -y glibc-common && \ pip3 install GitPython semantic-version requests && \